• tensorflow求解泊松方程


    此代码是本人两年前写的,那时候代码功底很差,所以代码有一些bug以及求解泊松方程的效果也不太好,读者有兴趣可以自己尝试修改代码以提高数值精度

    code1

    import numpy as np
    import tensorflow as tf
    import matplotlib.pyplot as plt
    import torch
    import torch.nn as nn
    import time
    tic = time.time()
    
    h1 = 0.1
    h2 = 0.05
    M = int(1/h1) + 1
    N = int(1/h2) + 1
    
    x_train = np.linspace(0,1,M)
    y_train = np.linspace(0,1,N)
    
    matr = np.zeros([M,N,2])#将所有网格点存储在矩阵matrix上,每一行代表一个网格点的坐标
    for i in range(M):
        for j in range(N):
            matr[i,j,0] = x_train[i]
            matr[i,j,1] = y_train[j]
    ma = matr.reshape(-1,2)
    print(ma.shape)
    ma_in = matr[1:-1,1:-1].reshape(-1,2)#将所有网格点存储在矩阵matrix上,每一行代表一个网格点的坐标
    print(ma_in.shape)
    ma_b = np.concatenate([matr[0],matr[-1],matr[:,0][1:-1],matr[:,-1][1:-1]],0)
    print(ma_b.shape)
    def f(x,y):#微分方程的右边函数f
        return - (2*np.pi**2)*np.exp(np.pi*(x + y))*np.sin(np.pi*(x + y))
    def u_accuracy(x,y):
        return np.exp(np.pi*(x + y))*np.sin(np.pi*x)*np.sin(np.pi*y)
    def u_boundary(x,y):
        return 0*x*y
    
    right_in = f(ma_in[:,0],ma_in[:,1]).reshape(-1,1)
    right_b = u_boundary(ma_b[:,0],ma_b[:,1]).reshape(-1,1)
    print(right_in.shape,right_b.shape)
    
    ma_min = np.array([[0,0]])
    ma_max = np.array([[1,1]])
    
    
    np.random.seed(1234)
    tf.compat.v1.set_random_seed(1234)
    #定义tensorflow框架
    prob_tf = tf.compat.v1.placeholder(tf.float32)
    x_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])
    x_in_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])
    x_b_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])
    
    right_in_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])
    right_b_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])
    x_min_tf = tf.compat.v1.placeholder(tf.float32,shape = [1,2])
    x_max_tf = tf.compat.v1.placeholder(tf.float32,shape = [1,2])
    layers = [2,10,10,1]
    
    
    H = 2*(x_tf - x_min_tf)/(x_max_tf - x_min_tf + 1e-7) - 1
    H_in = 2*(x_in_tf - x_min_tf)/(x_max_tf - x_min_tf + 1e-7) - 1
    H_b = 2*(x_b_tf - x_min_tf)/(x_max_tf - x_min_tf + 1e-7) - 1
    #定义权重和偏置
    def w_init(in_dim,out_dim):
        w_std = np.sqrt(2/(in_dim + out_dim))
        return tf.Variable(tf.random.truncated_normal([in_dim, out_dim], stddev = w_std), dtype=tf.float32)
    
    weights = []
    biases = []
    num_layers = len(layers)
    for i in range(0,num_layers - 1):
        w = w_init(in_dim = layers[i],out_dim = layers[i + 1])
        b = tf.Variable(tf.random.truncated_normal([1,layers[i + 1]], dtype = tf.float32), dtype=tf.float32)
        weights.append(w)
        biases.append(b)
    #输出近似解
    num_layers = len(weights)#比layers长度少1
    for i in range(0,num_layers - 1):
        w = weights[i]
        b = biases[i]
        E = tf.eye(tf.shape(w)[0],tf.shape(w)[1]) 
        tf.nn.dropout(w,rate = prob_tf)
        H_in = tf.tanh(tf.add(tf.matmul(H_in,w), b)) + tf.matmul(H_in,E)
        H_b = tf.tanh(tf.add(tf.matmul(H_b,w), b)) + tf.matmul(H_b,E)
        H = tf.tanh(tf.add(tf.matmul(H,w), b)) + tf.matmul(H,E)
    W = weights[-1]
    b = biases[-1]
    u_in = tf.add(tf.matmul(H_in,W),b)
    u_b = tf.add(tf.matmul(H_b,W),b)
    u = tf.add(tf.matmul(H,W),b)
    #定义损失函数
    u_x = tf.gradients(u_in,x_in_tf)[0][:,0]
    u_y = tf.gradients(u_in,x_in_tf)[0][:,1]
    u_xx = tf.gradients(u_x,x_in_tf)[0][:,0]
    u_yy = tf.gradients(u_y,x_in_tf)[0][:,1]
    
    loss_in = 0.5*tf.reduce_mean(tf.square(u_x) + tf.square(u_y) - 2*u_in*right_in_tf)
    #loss_in = 0.5*tf.reduce_mean((-u_xx - u_yy - 2*right_in_tf)*u_in)
    loss_b = tf.reduce_mean(tf.square(u_b - right_b_tf))
    belta = 5e3
    loss = tf.add(loss_in,belta*loss_b)
    #初始化
    
    
    
    def plot_curve(data):#用于损失函数训练过程的可视化
        fig = plt.figure(num = 1,figsize = (4,3),dpi = None,facecolor = None,edgecolor = None,frameon = True)#编号,宽和高,分辨率,背景颜色,边框颜色,是否显示边框
        plt.plot(range(len(data)),data,color = 'blue')
        plt.legend(['value'],loc = 'upper right')
        plt.xlabel('x')
        plt.ylabel('y')
        plt.show()
    
    lr = 5*1e-3
    optim = tf.compat.v1.train.AdamOptimizer(learning_rate = lr)
    trainer = optim.minimize(loss)
    sess = tf.compat.v1.Session()
    init =  tf.compat.v1.global_variables_initializer()
    sess.run(init)
    train_loss = []
    
    step = []
    error = []
    train_step = 6000
    for i in range(train_step):
        batch = 19
        st = i*batch%len(ma_in)
        end = st+ batch
        feed = {prob_tf:0.5,x_tf:ma,x_in_tf:ma_in[st:end],x_b_tf:ma_b,right_in_tf:right_in[st:end],right_b_tf:right_b,x_min_tf:ma_min,x_max_tf:ma_max}
        
        sess.run(trainer,feed_dict = feed)
        if i%600 == 0:
            print('train_step = {},loss = {}'.format(i,sess.run(loss,feed_dict = feed)))
            train_loss.append(sess.run(loss,feed_dict = feed))
            feed_val = {x_tf:ma,x_in_tf:ma_in,x_b_tf:ma_b,right_in_tf:right_in,right_b_tf:right_b,x_min_tf:ma_min,x_max_tf:ma_max}
            u_pred = sess.run(u,feed_dict = feed_val).reshape(M,N)
            x,y = np.meshgrid(x_train,y_train)
            error_square = ((u_pred - u_accuracy(x,y).T)**2).sum()/(u_accuracy(x,y)**2 + 1e-7).sum()
            error_step = np.sqrt(error_square)
            error.append(error_step)
            step.append(i)
            print(error_step)
            
            
    toc = time.time()
    plot_curve(train_loss)
    print(toc - tic)
    
    feed = {prob_tf:0.5,x_tf:ma,x_in_tf:ma_in,x_b_tf:ma_b,right_in_tf:right_in,right_b_tf:right_b,x_min_tf:ma_min,x_max_tf:ma_max}
    u_pred = sess.run(u,feed_dict = feed).reshape(M,N)
    print(type(u_pred),u_pred.shape)
    x,y = np.meshgrid(x_train,y_train)
    print(type(u_accuracy(x,y)),u_accuracy(x,y).shape)
    error_square = ((u_pred - u_accuracy(x,y).T)**2).sum()/(u_accuracy(x,y)**2 + 1e-7).sum()
    error = np.sqrt(error_square)
    print(error)
    plt.subplot(2,1,1)
    x,y = np.meshgrid(x_train,y_train)
    plt.contourf(x,y,u_accuracy(x,y),40,cmap = 'Blues')
    plt.colorbar()
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('accuracy solution')
    plt.subplot(2,1,2)
    x,y = np.meshgrid(x_train,y_train)
    plt.contourf(x,y,u_pred.T,40,cmap = 'Blues')
    plt.colorbar()
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('numerical solution')
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168

    code2

    import tensorflow as tf
    import numpy as np
    import matplotlib.pyplot as plt
    import time
    np.random.seed(1234)
    tf.compat.v1.set_random_seed(1234)
    def f(x,y):#微分方程的右边函数f
        return - (2*np.pi**2)*np.exp(np.pi*(x + y))*np.sin(np.pi*(x + y))
    def u_accuracy(x,y):#定义精确解
        return np.exp(np.pi*(x + y))*np.sin(np.pi*x)*np.sin(np.pi*y)
    def u_boundary(x,y):#定义边界函数
        return 0*x*y
    #定义内部点
    class INSET():#定义训练集,包含区域内部点
        def __init__(self):
            self.dim = 2
            self.xa,self.xb,self.ya,self.yb = 0,1,0,1
            self.area = (self.xb - self.xa)*(self.yb - self.ya)
            self.nx,self.ny = 20,30
            self.hx = (self.xb - self.xa)/self.nx
            self.hy = (self.yb - self.ya)/self.ny
            self.size = self.nx*self.ny
            self.X = np.zeros([self.size,self.dim])
            for i in range(self.nx):
                for j in range(self.ny):
                    self.X[i*self.ny + j,0] = self.xa + (i + 0.5)*self.hx
                    self.X[i*self.ny + j,1] = self.ya + (j + 0.5)*self.hy
            self.u_acc = u_accuracy(self.X[:,0],self.X[:,1]).reshape(-1,1)#内部点精确解
            self.right = f(self.X[:,0],self.X[:,1]).reshape(-1,1)#针对内部点的右边项
     #定义边界点       
    class BDSET():#定义训练集,包含区域边界点
        def __init__(self):
            self.dim = 2
            self.xa,self.xb,self.ya,self.yb = 0,1,0,1
            self.area = (self.xb - self.xa)*(self.yb - self.ya)
            self.length = 2*((self.xb - self.xa) + (self.yb - self.ya))
            self.nx,self.ny = 20,30
            self.hx = (self.xb - self.xa)/self.nx
            self.hy = (self.yb - self.ya)/self.ny
            self.size = 2*(self.nx + self.ny)
            self.X = np.zeros([self.size,self.dim])
            for i in range(self.nx):
                for j in range(self.ny):
                    self.X[i,0] = self.xa + (i + 0.5)*self.hx
                    self.X[i,1] = self.ya
                    self.X[self.nx + j,0] = self.xb
                    self.X[self.nx + j,1] = self.ya + (j + 0.5)*self.hy
                    self.X[self.nx + self.ny + i,0] = self.xb - self.xa - (i + 0.5)*self.hx
                    self.X[self.nx + self.ny + i,1] = self.yb
                    self.X[2*self.nx + self.ny + j,0] = self.xa
                    self.X[2*self.nx + self.ny + j,1] = self.yb - self.ya - (j + 0.5)*self.hy
            self.u_acc = u_boundary(self.X[:,0],self.X[:,1]).reshape(-1,1)#边界点精确解
    #定义测试集
    class TESET():#定义测试集
        def __init__(self):
            self.dim = 2
            self.xa,self.xb,self.ya,self.yb = 0,1,0,1
            self.hx = 0.1
            self.hy = 0.05
            self.nx = int((self.xb - self.xa)/self.hx) + 1
            self.ny = int((self.yb - self.ya)/self.hx) + 1
            self.size = self.nx*self.ny
            self.X = np.zeros([self.size,self.dim])
            for j in range(self.ny):
                for i in range(self.nx):
                    self.X[j*self.nx + i,0] = self.xa + i*self.hx
                    self.X[j*self.nx + i,1] = self.ya + j*self.hy
            self.u_acc = u_accuracy(self.X[:,0],self.X[:,1]).reshape(-1,1)#精确解
    
    class NN:
        def __init__(self,inset,bdset,layers,belta):
            self.inset = inset#准备传入训练集内部点
            self.bdset = bdset#准备传入训练集边界点
            self.datamin = np.array([[inset.xa,inset.ya]])
            self.datamax = np.array([[inset.xb,inset.yb]])
           
            self.layers = layers#准备传入神经元层
            
            self.inset_x_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备传入训练集内部点
            self.bdset_x_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备传入训练集边界点
            self.right_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])#准备传入训练集内部点右边项
            self.ub_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])#准备传入训练集边界点函数值
            self.min = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备数据集两个轴的下限
            self.max = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备数据集两个轴的上线限
            self.feed = {self.inset_x_tf:inset.X,self.bdset_x_tf:bdset.X,
                         self.right_tf:inset.right,self.ub_tf:bdset.u_acc,
                         self.min:datamin,self.max:datamax}#准备喂数据集
            self.Hin = 2*(self.inset_x_tf - self.min)/(self.max - self.min) - 1#正规化处理
            self.Hbd = 2*(self.bdset_x_tf - self.min)/(self.max - self.min) - 1#正规化处理
            self.weights,self.biases = self.NNinit()#通过函数NNinit完成权重,偏置初始化
            self.u_in = self.NNU(self.Hin)#通过函数NNU完成计算,也就是训练集内部点的近似值
            self.u_b = self.NNU(self.Hbd)#通过函数NNU完成计算,也就是训练集边界点的近似值
           
            self.ux,self.uy = self.u_grad(self.inset_x_tf)#通过u_grad得到训练集内部点的一阶偏导数
            self.loss_in = tf.reduce_mean(tf.square(self.ux) + tf.square(self.uy)) -\
            tf.reduce_mean(2*self.u_in*self.right_tf)#通过极小位能原理得到的针对内部点的损失函数
            self.loss_b = tf.reduce_mean(tf.square(self.u_b - self.ub_tf))#针对边界点的损失函数
            self.loss = self.loss_in + belta*self.loss_b#总的损失函数
            
            self.optim = tf.compat.v1.train.AdamOptimizer(learning_rate = 5e-3)#准备优化器
            self.minimizer = self.optim.minimize(self.loss)
            '''
            config=tf.compat.v1.ConfigProto(allow_soft_placement=True,
                                           log_device_placement=True)
            self.sess = tf.compat.v1.Session(config)
            '''
            self.sess = tf.compat.v1.Session()#创建会话
            init =  tf.compat.v1.global_variables_initializer()#初始化变量
            self.sess.run(init)
            
        def w_init(self,in_dim,out_dim):#初始化权重
            w_std = np.sqrt(2/(in_dim + out_dim))
            return tf.Variable(tf.random.truncated_normal([in_dim, out_dim], stddev = w_std), dtype=tf.float32)
        def NNinit(self):#初始化权重,偏置
            weights = []
            biases = []
            num = len(self.layers)
            for i in range(0,num - 1):
                w = self.w_init(in_dim = self.layers[i],out_dim = self.layers[i + 1])
                b = tf.Variable(tf.random.truncated_normal([1,self.layers[i + 1]], dtype = tf.float32), dtype=tf.float32)
                weights.append(w)
                biases.append(b)
            return weights,biases
        def NNU(self,Input):#计算神经网络输出
            Input = tf.cast(Input,tf.float32)
            weights,biases = self.NNinit()
            num = len(weights)
            for i in range(0,num - 1):
                w,b = weights[i],biases[i]
                Input = tf.tanh(tf.add(tf.matmul(Input,w),b))
            W,B = weights[-1],biases[-1]
            return tf.add(tf.matmul(Input,W),B)
        def u_grad(self,Input):#计算梯度
            Input = tf.cast(Input,tf.float32)
            u = self.NNU(Input)
            ux = tf.gradients(u,Input)[0][:,0:1]
            uy = tf.gradients(u,Input)[0][:,1:2] 
            return ux,uy
        def ERROR(self):#定义训练集精确解和近似解的误差
            fenzi_in = tf.reduce_mean(tf.square(self.u_in - self.inset.u_acc))
            fenzi_b = tf.reduce_mean(tf.square(self.u_b - self.bdset.u_acc))
            fenmu_in = tf.reduce_mean(tf.square(self.u_in))
            fenmu_b = tf.reduce_mean(tf.square(self.u_b))
            fenzi = fenzi_in + fenzi_b
            fenmu = fenmu_in + fenmu_b
            return tf.sqrt(fenzi/fenmu)
        
        def train(self,step):#定义训练过程
            print('train the neural network')
            st_time = time.time() 
            LOSS = self.sess.run(self.loss,feed_dict = self.feed)
            loss_best = LOSS
            weights_best = self.sess.run(self.weights)
            biases_best = self.sess.run(self.biases)
            record = 400
            for j in range(step):
                self.sess.run(self.minimizer,feed_dict = self.feed)#优化过程
                if j%record == 0:
                    error = self.sess.run(self.ERROR(),feed_dict = self.feed)
                    LOSS = self.sess.run(self.loss,feed_dict = self.feed)
                    print('train step:%d,loss:%.2f,error:%.2f'
                          %(j,LOSS,error))#打印损失函数,训练集误差
                    
                    if LOSS < loss_best:
                        loss_best = LOSS
                        weights_best = self.sess.run(self.weights)#准备保存最优权重
                        biases_best = self.sess.run(self.biases)#准备保存最优偏置
                    
            epo_time = time.time() - st_time
            print('one epoch used:%.2f'%(epo_time))
            print('------------------------------------------------')
           
        def trainbatch(self,step):#尝试使用batch输入以提高精度,减少时间
            print('train the neural network')
            st_time = time.time() 
            batch = self.inset.nx
            LOSS = self.sess.run(self.loss,feed_dict = self.feed)
            loss_best = LOSS
            weights_best = self.sess.run(self.weights)
            biases_best = self.sess.run(self.biases)
            record = 100
            for j in range(step):
                x = i*batch%len(self.inset.X)
                y = x + batch
                feed = {self.inset_x_tf:inset.X[x:y],self.bdset_x_tf:bdset.X,
                        self.right_tf:inset.right[x:y],self.ub_tf:bdset.u_acc,
                        self.min:datamin,self.max:datamax}
                self.sess.run(self.minimizer,feed_dict = feed)
                if j%record == 0:
                    error = self.sess.run(self.ERROR(),feed_dict = self.feed)
                    LOSS = self.sess.run(self.loss,feed_dict = self.feed)
                    print('train step:%d,loss:%.2f,error:%.2f'
                          %(j,LOSS,error))
                    '''
                    if LOSS < loss_best:
                        loss_best = LOSS
                        weights_best = self.sess.run(self.weights)
                        biases_best = self.sess.run(self.biases)
                    '''
            epo_time = time.time() - st_time
            print('one epoch used:%.2f'%(epo_time))
            print('------------------------------------------------')
           
    inset = INSET()
    bdset = BDSET()
    teset = TESET()
    layers = [2,20,10,1]
    belta = 5e3
    #datamin = np.array([[teset.xa,teset.ya]])
    #datamax = np.array([[teset.xb,teset.yb]])
    ne = NN(inset,bdset,layers,belta)#对类进行实例化
    epochs = 3
    st = time.time()
    for i in range(epochs):#开始训练
        print('epochs:%d'%(i))
        #ne.trainbatch(500)
        ne.train(2000)
    ed = time.time()
    print('all used time:%.2f'%(ed - st))
    
    #预测函数近似值和误差
    feed = {ne.inset_x_tf:teset.X,ne.min:datamin,ne.max:datamax}
    u_pred = ne.sess.run(ne.u_in,feed_dict = feed)#利用类NN中的u_in来计算测试集上的近似值
    u_acc = np.array(teset.u_acc,dtype = np.float32)#拿出测试集中精确解
    def LERROR(u_pred,u_acc):
        fenzi = np.square(u_pred - u_acc).sum()
        fenmu = (np.square(u_acc) + 1e-7).sum()
        return np.sqrt(fenzi/fenmu)
    
    print('the test error:%.3f'%(LERROR(u_pred,u_acc)))#打印测试上的误差
    
    M = teset.nx
    N = teset.ny
    
    x_train = np.linspace(teset.xa,teset.xb,M)
    y_train = np.linspace(teset.ya,teset.yb,N)
    
    x,y = np.meshgrid(x_train,y_train)
    plt.contourf(x,y,u_pred.reshape(M,N).T,40,cmap = 'Blues')#画出图像
    plt.colorbar()
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('numerical solution')
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223
    • 224
    • 225
    • 226
    • 227
    • 228
    • 229
    • 230
    • 231
    • 232
    • 233
    • 234
    • 235
    • 236
    • 237
    • 238
    • 239
    • 240
    • 241
    • 242
    • 243
  • 相关阅读:
    使用Http Interface客户端解析text/html类型参数
    使用PLM系统对电池企业有哪些好处
    30天刷题计划(五)
    MySQL实践——查看谁在持有锁
    【Linux内核系列】进程调度
    2024华北医院信息网络大会第二轮更新通知
    python-爬虫-三字代码网站爬取
    SSM整合分页插件
    原生Js Canvas去除视频绿幕背景
    Qt QFile文件操作详解
  • 原文地址:https://blog.csdn.net/forrestguang/article/details/125477179