• PyG-GCN-Cora(在Cora数据集上应用GCN做节点分类)


    model.py

    import torch.nn as nn
    from torch_geometric.nn import GCNConv
    import torch.nn.functional as F
    class gcn_cls(nn.Module):
        def __init__(self,in_dim,hid_dim,out_dim,dropout_size=0.5):
            super(gcn_cls,self).__init__()
            self.conv1 = GCNConv(in_dim,hid_dim)
            self.conv2 = GCNConv(hid_dim,hid_dim)
            self.fc = nn.Linear(hid_dim,out_dim)
            self.relu  = nn.ReLU()
            self.dropout_size = dropout_size
        def forward(self,x,edge_index):
            x = self.conv1(x,edge_index)
            x = F.dropout(x,p=self.dropout_size,training=self.training)
            x = self.relu(x)
            x = self.conv2(x,edge_index)
            x = self.relu(x)
            x = self.fc(x)
            return x
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19

    main.py

    import torch
    import torch.nn as nn
    from torch_geometric.datasets import Planetoid
    from model import gcn_cls
    import torch.optim as optim
    dataset = Planetoid(root='./data/Cora', name='Cora')
    print(dataset[0])
    cora_data = dataset[0]
    
    epochs = 50
    lr = 1e-3
    weight_decay = 5e-3
    momentum = 0.5
    hidden_dim = 128
    output_dim = 7
    
    net = gcn_cls(cora_data.x.shape[1],hidden_dim,output_dim)
    optimizer = optim.AdamW(net.parameters(),lr=lr,weight_decay=weight_decay)
    #optimizer = optim.SGD(net.parameters(),lr = lr,momentum=momentum)
    criterion = nn.CrossEntropyLoss()
    print("****************Begin Training****************")
    net.train()
    for epoch in range(epochs):
        out = net(cora_data.x,cora_data.edge_index)
        optimizer.zero_grad()
        loss_train = criterion(out[cora_data.train_mask],cora_data.y[cora_data.train_mask])
        loss_val   = criterion(out[cora_data.val_mask],cora_data.y[cora_data.val_mask])
        loss_train.backward()
        print('epoch',epoch+1,'loss-train {:.2f}'.format(loss_train),'loss-val {:.2f}'.format(loss_val))
        optimizer.step()
    
    net.eval()
    out = net(cora_data.x,cora_data.edge_index)
    loss_test = criterion(out[cora_data.test_mask],cora_data.y[cora_data.test_mask])
    _,pred = torch.max(out,dim=1)
    pred_label = pred[cora_data.test_mask]
    true_label = cora_data.y[cora_data.test_mask]
    acc = sum(pred_label==true_label)/len(pred_label)
    print("****************Begin Testing****************")
    print('loss-test {:.2f}'.format(loss_test),'acc {:.2f}'.format(acc))
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40

    参数设置

    epochs = 50
    lr = 1e-3
    weight_decay = 5e-3
    momentum = 0.5
    hidden_dim = 128
    output_dim = 7
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    output_dim是输出维度,也就是有多少可能的类别。

    注意事项

    1.发现loss不下降
    建议改一改lr(学习率),我做的时候开始用的SGD,学习率设的0.01发现loss不下降,改成0.1后好了很多。如果用AdamW,0.001(1e-3)基本就够用了

    运行图

    在这里插入图片描述

  • 相关阅读:
    【从零开始一步步学习VSOA开发】同步RPC客户端
    FileInputStream文件字节输入流
    VC/C++ Intel x86 内联汇编实现 “Interlocked” 原子变量各种操作
    172.阶乘后的零 | 793.阶乘函数后k个零
    android FM DAB相关的知识
    DStream转换介绍_大数据培训
    「USACO 做题笔记」USACO 2011 Nov Bronze
    DDD与微服务的千丝万缕
    IDEA的常用设置
    软件License授权原理
  • 原文地址:https://blog.csdn.net/m0_59741202/article/details/133048022