目标战术意图由一系列动作实现,因此目标状态呈现时序动态变化特征。针对目标意图识别问题的特点,本文是对基于循环神经网络进行意图识别的实现。现有的用于复杂环境下对目标意图识别方法主要有模板匹配、证据推理、贝叶斯网络和神经网络等。
下载链接:https://download.csdn.net/download/DeepLearning_/87232407
下载链接:https://download.csdn.net/download/DeepLearning_/87232463
下载链接:https://download.csdn.net/download/DeepLearning_/87232539
下载链接:https://download.csdn.net/download/DeepLearning_/87232571
提示:以下是本篇文章正文内容,下面案例可供参考
#!/usr/bin/env python
# coding: utf-8
import tensorflow as tf
import keras.backend as K
from keras.callbacks import ReduceLROnPlateau
from keras.layers import Multiply
from keras.layers.core import *
from keras.layers.recurrent import LSTM
from keras.layers.recurrent import GRU
from keras.layers.recurrent import SimpleRNN
from keras.layers import Bidirectional
from keras.models import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from collections import Counter
import pandas as pd
import numpy as np
import os
import time
import random
import math
代码如下(示例):
def attention_3d_block(inputs,SINGLE_ATTENTION_VECTOR):
input_dim = int(inputs.shape[2])
a = Permute((2, 1))(inputs)#(time_steps, input_dim)维度转置
a = Reshape((input_dim, TIME_STEPS))(a)
a = Dense(TIME_STEPS, activation='softmax')(a)#计算每个特征的权重
if SINGLE_ATTENTION_VECTOR:#是否共享注意力权重
a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)
a = RepeatVector(input_dim)(a)
a_probs = Permute((2, 1), name='attention_vec')(a)
output_attention_mul = Multiply()([inputs, a_probs])
return output_attention_mul
代码如下(示例):
def modelAttentionAfterGRU():
K.clear_session() #清除之前的模型
inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))
GRU_out = GRU(units, return_sequences=True)(inputs)
attention_mul = Flatten()(attention_mul)
output = Dense(3, activation='softmax')(attention_mul)
model = Model(input=[inputs], output=output)
return model
def modelAttentionBiLSTM():
K.clear_session()
inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))
units = 128
attention_mul = Flatten()(attention_mul)
inputs= Dense(3, activativa='softmax')(attention_mul)
model = Model(input=[inputs], output=output)
return model
def getData(data_length):
x_data = []
label = []
specimen_size = 1500 #设定输入序列长度,只取前1500个数据点
#特征值取值范围,用于归一化处理
d_max = 8922;d_min = 0;d_gap = d_max-d_min
c_max = 29;c_min = 0;c_gap = c_max-c_min
p = getDocumentList('./SCENARIO_DATA')
for pp in p:
q = getDocumentList('./SCENARIO_DATA/%s'%(pp))
for qq in q:
tree = ET.parse('./SCENARIO_DATA/%s/%s'%(pp,qq))
root = tree.getroot()#获取xml文档的根节点
if (root[0].tag=='轰炸' or root[0].tag=='反辐射' or root[0].tag=='护航'):
x_signal = [ [root[0][i].attrib[j] for j in ['x','y','z']] for i in range(len(root[0])) if root[0][i].tag=='数据' and i<specimen_size]
if(len(x_signal)>1000):
x_signal=list(np.array(x_signal,dtype=float))
d0 = 0
x_feature = []
if(len(x_signal)<data_length):
data_length = len(x_signal)
for i in range(0,data_length-5,3):
d = 0
for j in range(i,i+5):
dx = math.pow(x_signal[j+1][0]-x_signal[j][0],2)
dy = math.pow(x_signal[j+1][1]-x_signal[j][1],2)
dz = math.pow(x_signal[j+1][2]-x_signal[j][2],2)
d += math.sqrt(dx+dy+dz)
a = (d/15-d0/15)/6
d0 = d #保留用于下一段加速度的计算
#使用窗口中的三个点构建三角形拟合二次曲线
len1 = math.sqrt( math.pow(x_signal[i+2][0]-x_signal[i][0],2)+math.pow(x_signal[i+2][1]-x_signal[i][1],2)+math.pow(x_signal[i+2][2]-x_signal[i][2],2) )
len2 = math.sqrt( math.pow(x_signal[i+2][0]-x_signal[i+4][0],2)+math.pow(x_signal[i+2][1]-x_signal[i+4][1],2)+math.pow(x_signal[i+2][2]-x_signal[i+4][2],2) )
len3 = math.sqrt( math.pow(x_signal[i+4][0]-x_signal[i+2][0],2)+math.pow(x_signal[i+4][1]-x_signal[i][1],2)+math.pow(x_signal[i+4][2]-x_signal[i][2],2) )
if( len1==0 or len2==0 or len3==0):
c = 0
else:
half = (len1+len2+len3)/2
if(half<len1 or half<len2 or half<len3):
c = 0
else:
h = 2*math.sqrt( half*(half-len1)*(half-len2)*(half-len3) )/len3
xh = math.sqrt(len1*len1-h*h)
poly = np.polyfit([0,xh,len3],[1,h,0],deg=2)
c = abs(poly[0])
x_feature.append([(d-d_min)/d_gap,(a-a_min)/a_gap,(c-c_min)/c_gap])#归一化处理
if len(x_feature)<500:
for j in range(500-len(x_feature)):
x_feature.append([0,0,0])
if root[0].tag=='轰炸':
label.append(0)
x_data.append(x_feature)
elif root[0].tag=='反辐射':
label.append(1)
x_data.append(x_feature)
elif root[0].tag=='护航':
label.append(2)
x_data.append(x_feature)
x_data=np.array(x_data,dtype=float)
#print(Counter(label))
return x_data,label
INPUT_DIM = 3 #输入特征维度
TIME_STEPS = 500 #输入序列长度
if __name__ == '__main__':
x_data,y_data = getData(1500)
training_time = []
weighted_error_rate = []
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',factor=0.1,verbose=1,min_lr=0.0001,patience=10)#min_lr=0.0001
for i_test in range(5):
x_train,x_test, y_train,y_test= train_test_split(x_data, y_data, test_size=0.2, random_state=i_test, shuffle=True, stratify=y_data)
start = time.time()
#m = modelGRU()
m.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
m.summary()
historyGRU = m.fit([x_train], y_train, epochs=200, batch_size=32, validation_split=0.1,shuffle=True,callbacks=[reduce_lr])
end = time.time()
training_time.append(end-start)
pred = m.predict(x_test)
weighted_error_rate.append(weighted_error_rate_i)
print("training_time: %0.3f s" % (np.mean(training_time))) #均值和方差
print("weighted_error_rate: %0.3f (± %0.3f)" % (np.mean(weighted_error_rate), np.var(weighted_error_rate))) #均值和方差
#模型训练时的Loss可视化
accGRU = historyGRU.history['accuracy'] # 训练集准确率
lossGRU = historyGRU.history['loss'] # 训练集损失
plt.plot(lossGRU, label='GRU')
#plt.title('Training Loss')
#使用中文图注
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
plt.xlabel('训练步数')
plt.ylabel('损失')
plt.legend()
#plt.savefig('./训练损失.jpg')
plt.show()