关注 码龄 粉丝数 原力等级 -- 被采纳 被点赞 采纳率 qq_53216250 2024-03-31 21:57
采纳率: 0%
浏览 5 首页/
编程语言
/ tsne可视化cnn模型 pythoncnn tsne可视化代码如下:
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 11:55:08 2021
@author: 1
"""
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
from sklearn.manifold import TSNE
import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from scikeras.wrappers import KerasClassifier
from tensorflow.python.keras.utils.np_utils import to_categorical
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras.optimizers import SGD
from keras.layers import Dense, LSTM, Activation, Flatten, Convolution1D, Dropout, MaxPooling1D, BatchNormalization
from keras.models import load_model
from sklearn import preprocessing
# 载入数据
df= pd.read_csv(r'/root/autodl-tmp/376data3.csv')
X = np.expand_dims(df.values[:, 0:1024].astype(float), axis=2)
Y = df.values[:, 1024]
# 湿度分类编码为数字
# 划分训练集,测试集
X_train, X_test, K, y = train_test_split(X, Y, test_size=0.3, random_state=0)
K = K
encoder = LabelEncoder()
Y_encoded1 = encoder.fit_transform(K)
Y_train = to_categorical(Y_encoded1)
Y_encoded2 = encoder.fit_transform(y)
Y_test = to_categorical(Y_encoded2)
# 定义神经网络
def baseline_model():
model = Sequential()
model.add(Convolution1D(16, 64, strides=16, padding='same', input_shape=(1024, 1), activation='relu')) # 第一个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(32, 3, padding='same', activation='relu'))
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第二个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第三个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第四个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第五个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Dense(100, activation='relu'))
model.add(LSTM(64, return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(32))
model.add(Flatten())
model.add(Dense(758, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
# 训练分类器
estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=128, verbose=1)
history = estimator.fit(X_train, Y_train, validation_data=(X_test, Y_test))
import matplotlib.pyplot as plt
print(estimator.model)
# 卷积网络可视化
def visual(model, data, num_layer=1):
layer = keras.backend.function([model.layers[0].input], [model.layers[num_layer].output])
f1 = layer([data])[0]
np.set_printoptions(threshold=np.inf)
print(f1.shape)
print(f1)
f2 = f1.reshape(6034, 64)
print(f2)
num = f1.shape[-1]
print(num)
plt.figure(figsize=(6, 12), dpi=150)
for i in range(num):
plt.subplot(np.ceil(np.sqrt(num)), np.ceil(np.sqrt(num)), i + 1)
plt.imshow(f1[:, :, i] * 255, cmap='prism')
plt.axis('off')
plt.show()
def get_data():
# digits = datasets.load_digits(n_class=10)
digits = 2
data = f2 # digits.data # 图片特征
label = K # digits.target # 图片标签
n_samples = 6034
n_features = 64 # data.shape # 数据集的形状
return data, label, n_samples, n_features
# 对样本进行预处理并画图
def plot_embedding(data, label, title):
x_min, x_max = np.min(data, 0), np.max(data, 0)
data = (data - x_min) / (x_max - x_min) # 对数据进行归一化处理
fig = plt.figure() # 创建图形实例
ax = plt.subplot(111) # 创建子图
# 遍历所有样本
for i in range(data.shape[0]):
# 在图中为每个数据点画出标签
plt.text(data[i, 0], data[i, 1], str(label[i]), color=plt.cm.Set1(label[i] / 10),
fontdict={'weight': 'bold', 'size': 7})
plt.xticks() # 指定坐标的刻度
plt.yticks()
plt.title(title, fontsize=14)
# 返回值
return fig
data, label, n_samples, n_features = get_data() # 调用函数,获取数据集信息
print('Starting compute t-SNE Embedding...')
ts = TSNE(n_components=2, init='pca', random_state=0)
# t-SNE降维
reslut = ts.fit_transform(data)
# 调用函数,绘制图像
fig = plot_embedding(reslut, label, 't-SNE Embedding of digits')
# 显示图像
plt.show()
# 可视化卷积层
visual(estimator.model, X_train, 20) # 在这里插入代码片
报错如下:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_15584/378268790.py in
111 # 训练分类器
112 estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=128, verbose=1)
--> 113 history = estimator.fit(X_train, Y_train, validation_data=(X_test, Y_test))
114 import matplotlib.pyplot as plt
115
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in fit(self, X, y, sample_weight, **kwargs)
1489 sample_weight = 1 if sample_weight is None else sample_weight
1490 sample_weight *= compute_sample_weight(class_weight=self.class_weight, y=y)
-> 1491 super().fit(X=X, y=y, sample_weight=sample_weight, **kwargs)
1492 return self
1493
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in fit(self, X, y, sample_weight, **kwargs)
758 kwargs["initial_epoch"] = kwargs.get("initial_epoch", 0)
759
--> 760 self._fit(
761 X=X,
762 y=y,
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in _fit(self, X, y, sample_weight, warm_start, epochs, initial_epoch, **kwargs)
926 self._check_model_compatibility(y)
927
--> 928 self._fit_keras_model(
929 X,
930 y,
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in _fit_keras_model(self, X, y, sample_weight, warm_start, epochs, initial_epoch, **kwargs)
522 hist = self.model_.fit(x=X, y=y, **fit_args)
523 else:
--> 524 hist = self.model_.fit(x=X, y=y, **fit_args)
525
526 if not warm_start or not hasattr(self, "history_") or initial_epoch == 0:
~/miniconda3/lib/python3.8/site-packages/keras/src/utils/traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
~/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py in tf__test_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1972, in test_function *
return step_function(self, iterator)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1956, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1944, in run_step **
outputs = model.test_step(data)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1852, in test_step
self.compute_loss(x, y, y_pred, sample_weight)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1139, in compute_loss
return self.compiled_loss(
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/losses.py", line 142, in __call__
losses = call_fn(y_true, y_pred)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/losses.py", line 268, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/losses.py", line 2122, in categorical_crossentropy
return backend.categorical_crossentropy(
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/backend.py", line 5560, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 434) and (None, 758) are incompatible
展开全部
收起
写回答
好问题
0 提建议
追加酬金
关注问题
微信扫一扫 点击复制链接 分享 邀请回答
编辑 收藏 删除 结题 收藏 举报 追加酬金 (90%的用户在追加酬金后获得了解决方案) 当前问题酬金 ¥ 0
(可追加 ¥500)
¥ 15¥ 20¥ 50¥ 100¥ 200 支付方式 扫码支付
二维码出错
点击刷新
支付金额
15 元
提供问题酬金的用户不参与问题酬金结算和分配
支付即为同意
《付费问题酬金结算规则》 结题 再想想 删除 再等等3条回答 默认 最新
关注
关注
该回答引用自GPT-3.5,由博主GISer Liu编写:
根据你的代码,你想要使用 t-SNE 对 CNN
模型的卷积层输出进行可视化。但是在你的代码中存在一些问题,主要是在可视化部分。下面我将解释如何修改代码以正确执行可视化:
- 卷积网络可视化函数
visual 的问题: 该函数中的 layer 对象应该传入 model.layers[num_layer].output,而不是 model.layers[num_layer]。因为 layer 函数需要输入和输出张量列表,而不是单个张量。 - 获取数据函数
get_data 的问题: 在该函数中,data 应该是从卷积层中提取的特征张量,而不是之前定义的 f2。因为 f2 是在 visual 函数内部定义的,无法在 get_data 函数中使用。你需要将 f2 的计算移到 get_data 函数内部。 - 主程序部分的问题: 在主程序部分,你应该调用
visual 函数来进行可视化,而不是直接使用 TSNE 进行降维。TSNE 只是用来将高维特征降到二维用于可视化,而不是对卷积层的输出进行可视化。
下面是修改后的代码:
pythonCopy codedef visual(model, data, num_layer=1):
layer = keras.backend.function([model.layers[0].input], [model.layers[num_layer].output])
f1 = layer([data])[0]
np.set_printoptions(threshold=np.inf)
print(f1.shape)
print(f1)
num = f1.shape[-1]
print(num)
plt.figure(figsize=(6, 12), dpi=150)
for i in range(num):
plt.subplot(np.ceil(np.sqrt(num)), np.ceil(np.sqrt(num)), i + 1)
plt.imshow(f1[:, :, i] * 255, cmap='prism')
plt.axis('off')
plt.show()
def get_data(model, X_train):
layer_output = keras.backend.function([model.layers[0].input], [model.layers[1].output])
data = layer_output([X_train])[0]
label = Y_train
n_samples = data.shape[0]
n_features = data.shape[1]
return data, label, n_samples, n_features
model = baseline_model()
data, label, n_samples, n_features = get_data(model, X_train)
# 传入正确的参数,进行可视化
visual(model, data, num_layer=1)
在这个修改后的代码中,我假设第一个卷积层是你想要可视化的层,并传入 num_layer=1。如果需要可视化其他层的输出,只需修改 num_layer 的值即可。
另外,请确保你已经正确导入所需的库,比如 keras、numpy、pandas 等。
如果该回答解决了您的问题,请采纳!如果没有,请参考以下方案进行修订

本回答被题主选为最佳回答 , 对您是否有帮助呢? 本回答被专家选为最佳回答 , 对您是否有帮助呢? 本回答被题主和专家选为最佳回答 , 对您是否有帮助呢?
解决
无用
评论
打赏
微信扫一扫 点击复制链接
分享 举报
-
【相关推荐】
如果你已经解决了该问题, 非常希望你能够分享一下解决方案, 写成博客, 将相关链接放在评论区, 以帮助更多的人 ^-^
本回答被题主选为最佳回答 , 对您是否有帮助呢? 本回答被专家选为最佳回答 , 对您是否有帮助呢? 本回答被题主和专家选为最佳回答 , 对您是否有帮助呢?
解决
无用
评论
打赏
微信扫一扫 点击复制链接
分享 举报