如何得到所有学习数据的损失函数的总和
太难受了吧
对每个小数据进行批量学习
import numpy as np
# 实现从训练数据中随机选择指定个数的数据的代码
import os, sys
sys.path.append(os.pardir)
from dataset.mnist import load_mnist # 表示从dataset目录中的mnist.py文件中导入想要的东西
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
print(x_train.shape)
print(t_train.shape)
D:\ANACONDA\envs\pytorch\python.exe C:/Users/Administrator/Desktop/DeepLearning/ch04/wgw_test.py
(60000, 784)
(60000, 10)
Process finished with exit code 0
使用numpy中的random函数来帮个忙
train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size) # choice函数返回的是ndarray类型的数据
# print(type(batch_mask))
x_batch=x_train[batch_mask]
t_batch=x_train[batch_mask]
相当于选取了一个随机数种子

和电视台的收视率一样
监督数据就是用来考察训练数据的呗
import numpy as np
# 实现从训练数据中随机选择指定个数的数据的代码
import os, sys
sys.path.append(os.pardir)
from dataset.mnist import load_mnist # 表示从dataset目录中的mnist.py文件中导入想要的东西
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
# 让我来康康训练数据和监督数据的规模
# print(x_train.shape)
# print(t_train.shape)
train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size) # choice函数返回的是ndarray类型的数据
# print(type(batch_mask))
x_batch=x_train[batch_mask]
t_batch=x_train[batch_mask]