作为记录用,如代码有不当的地方欢迎大家交流
from re import M
import torch
import os
import cv2
from torch.nn.modules.activation import ReLU
from torch.nn.modules.conv import LazyConv1d
from torch.nn.modules.loss import CrossEntropyLoss
from torch.optim import optimizer
from torchvision import models
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import torchvision
from torch.utils import data
from MobileNet import MobileNetV1
from torch.utils.data import DataLoader
from torchvision.transforms.transforms import Scale
camer = cv2.VideoCapture(0, cv2.CAP_DSHOW)
image_size = 28
batch_size = 15
epochs = 50
learning_rate = 0.00095
VGGMODEL = False
train = 0
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
data_train = torchvision.datasets.MNIST(root='./',train=True,download=True,transform=transform)
data_test = torchvision.datasets.MNIST(root='./',train=False,download=True,transform=transform)
data_train = DataLoader(dataset=data_train,batch_size=batch_size,shuffle=True,num_workers=4, drop_last=True)
data_test = DataLoader(dataset=data_test,batch_size=batch_size,shuffle=True)
print(len(data_train))
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.drop_out = nn.Dropout()
self.flatten1 = nn.Linear(7 * 7 * 64, 1000)
self.flatten2 = nn.Linear(1000, 10)
def forward(self, x):
out = self.layer1(x)
out = out.reshape(out.size(0), -1)
out = self.drop_out(out)
out = self.flatten1(out)
out = self.flatten2(out)
return out
class VGG16(nn.Module):
def __init__(self):
super(VGG16, self).__init__()
self.vgg16 = models.vgg16(pretrained=True)
self.classfsify = nn.Sequential(
nn.ReLU(inplace=True),
nn.Dropout(p=0.5, inplace=False),
nn.Linear(1000, 100),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5, inplace=False),
nn.Linear(100, 10)
)
def forward(self, input):
input = self.vgg16(input)
input = self.classfsify(input)
return input
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if VGGMODEL:
model = VGG16().to(device)
for param in model.children():
param.requires_grad = False
print(model)
elif train == 1:
pass
model = LeNet().to(device)
print(device)
print(torch.cuda.device_count())
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
train_loss = 0.0
num_batchs = len(data_train)
if train == 0:
for enpoch in range(epochs):
model.train()
losses = 0.0
train_loss = 0.0
for iteration, (images, labels) in enumerate(data_train):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
print(outputs.shape)
loss = loss_function(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses += loss.item()
train_loss += loss.item()
if((iteration + 1) % 50 == 0):
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}%'.format(enpoch + 1, epochs, iteration + 1, num_batchs, losses / 50))
losses = 0.0
elif train == 1:
model.eval()
test_correct_num = 0
with torch.no_grad():
for epoch in range(0, epochs):
test_correct_num = 0
for batch_idx,(data,target) in enumerate(data_test):
data = data.to(device)
target = target.to(device)
output = model(data)
_, pred = torch.max(output, 1)
test_correct_num += torch.sum(pred==target).item()
print("Test Epoch:{}\t right_num: {}\t acc:{:.2f}".format(epoch + 1, test_correct_num, test_correct_num/100.))
elif train == 3:
model.eval()
camer = cv2.VideoCapture('rtsp://admin:12345@192.168.3.142:8554/live')
with torch.no_grad():
while camer.isOpened():
ret, frame = camer.read()
keypressed = cv2.waitKey(1)
print('键盘按下的键是:', keypressed)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,5)
cv2.imshow('image', gray)
if keypressed == 27:
break
camer.release()
cv2.destroyAllWindows()
elif train == 4:
model = LeNet().to(device)
model.load_state_dict(torch.load('model.pkl'))
model.eval()
x = torch.randn(1, 1, 28, 28, device="cuda")
torch.onnx.export(
model,
x,
"model.onnx",
verbose=True,
input_names=["input"],
dynamic_axes={"input":{0:"batch_size"},
"output":{0:"batch_size"}}
)

- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 148
- 149
- 150
- 151
- 152
- 153
- 154
- 155
- 156
- 157
- 158
- 159
- 160
- 161
- 162
- 163
- 164
- 165
- 166
- 167
- 168
- 169
- 170
- 171
- 172
- 173
- 174
- 175
- 176
- 177
- 178
- 179
- 180
- 181
- 182
- 183
- 184
- 185
- 186
- 187
- 188
- 189
- 190
- 191
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('../data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('../data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()

- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137