• torch.multiprocesssing


    https://www.geeksforgeeks.org/multiprocessing-in-python-and-pytorch/

    # Import the necessary libraries 
    import torch 
    import torch.nn as nn 
    import torch.multiprocessing as mp 
    
    
    # Define the training function 
    def train(model, X, Y): 
    	# Define the learning rate, number of iterations, and loss function 
    	learning_rate = 0.01
    	n_iters = 100
    	loss = nn.MSELoss() 
    	optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) 
    
    	# Loop through the specified number of iterations 
    	for epoch in range(n_iters): 
    		# Make predictions using the model 
    		y_predicted = model(X) 
    
    		# Calculate the loss 
    		l = loss(Y, y_predicted) 
    
    		# Backpropagate the loss to update the model parameters 
    		l.backward() 
    		optimizer.step() 
    		optimizer.zero_grad() 
    
    		# Print the current loss and weights every 10 epochs 
    		if epoch % 10 == 0: 
    			[w, b] = model.parameters() 
    			print( 
    				f"Rank {mp.current_process().name}: epoch {epoch+1}: w = {w[0][0].item():.3f}, loss = {l:.3f}"
    			) 
    
    
    # Main function 
    if __name__ == "__main__": 
    	# Set the number of processes and define the input and output data 
    	num_processes = 4
    	X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32) 
    	Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32) 
    	n_samples, n_features = X.shape 
    
    	# Print the number of samples and features 
    	print(f"#samples: {n_samples}, #features: {n_features}") 
    
    	# Define the test input and the model input/output sizes 
    	X_test = torch.tensor([5], dtype=torch.float32) 
    	input_size = n_features 
    	output_size = n_features 
    
    	# Define the linear model and print its prediction on the test input before training 
    	model = nn.Linear(input_size, output_size) 
    	print(f"Prediction before training: f(5) = {model(X_test).item():.3f}") 
    
    	# Share the model's memory to allow it to be accessed by multiple processes 
    	model.share_memory() 
    
    	# Create a list of processes and start each process with the train function 
    	processes = [] 
    	for rank in range(num_processes): 
    		p = mp.Process( 
    			target=train, 
    			args=( 
    				model, 
    				X, 
    				Y, 
    			), 
    			name=f"Process-{rank}", 
    		) 
    		p.start() 
    		processes.append(p) 
    		print(f"Started {p.name}") 
    
    	# Wait for all processes to finish 
    	for p in processes: 
    		p.join() 
    		print(f"Finished {p.name}") 
    
    	# Print the model's prediction on the test input after training 
    	print(f"Prediction after training: f(5) = {model(X_test).item():.3f}") 
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82

    结果如下
    在这里插入图片描述
    在这里插入图片描述
    这个网上说能加速,但是我自己没有测试过

  • 相关阅读:
    Gateway学习和源码解析
    【附源码】计算机毕业设计SSM网络考试系统设计
    HCNP Routing&Switching之DHCP中继
    SpringCloud Alibaba系列 Sentinel(三)
    机器学习|K邻近(K Nearest-Neighbours)
    谈加班与管理:加班 = 管理 + 无能?
    多线程中的锁 —— synchronized
    【笔记】Ningx(9)HTTPS
    美国将采取新政策降低对中国大陆的依赖 | 百能云芯
    续-一个请求的过程
  • 原文地址:https://blog.csdn.net/qq_45759229/article/details/134023397