Snippets Collections
h_feats = 64
learn_iterations = 50
learn_rate = 0.01

model = EntityGraphModule(
    dataset.graphs[0].ndata["feat"].shape[1],
    dataset.graphs[0].edata["feat"].shape[1],
    h_feats,
    dataset.labels.max().item() + 1
)
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)

for _ in range(learn_iterations):
    for batched_graph, labels in train_dataloader:
        pred = model(batched_graph, batched_graph.ndata["feat"].float(), batched_graph.edata["feat"].float())
        loss = F.cross_entropy(pred, labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

num_correct = 0
num_tests = 0
for batched_graph, labels in test_dataloader:
    pred = model(batched_graph, batched_graph.ndata["feat"].float(), batched_graph.edata["feat"].float())
    num_correct += (pred.argmax(1) == labels).sum().item()
    num_tests += len(labels)

acc = num_correct / num_tests
print("Test accuracy:", acc)
from torch.utils.data.sampler import SubsetRandomSampler
from dgl.dataloading import GraphDataLoader

num_examples = len(dataset)
num_train = int(num_examples * 0.8)

train_sampler = SubsetRandomSampler(torch.arange(num_train))
test_sampler = SubsetRandomSampler(torch.arange(num_train, num_examples))

train_dataloader = GraphDataLoader(
    dataset, sampler=train_sampler, batch_size=5, drop_last=False
)
test_dataloader = GraphDataLoader(
    dataset, sampler=test_sampler, batch_size=5, drop_last=False
)
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn import NNConv, SAGEConv

class EntityGraphModule(nn.Module):
    def __init__(self, node_in_feats, edge_in_feats, h_feats, num_classes):
        super(EntityGraphModule, self).__init__()
        lin = nn.Linear(edge_in_feats, node_in_feats * h_feats)
        edge_func = lambda e_feat: lin(e_feat)
        self.conv1 = NNConv(node_in_feats, h_feats, edge_func)

        self.conv2 = SAGEConv(h_feats, num_classes, "pool")

    def forward(self, g, node_features, edge_features):
        h = self.conv1(g, node_features, edge_features)
        h = F.relu(h)
        h = self.conv2(g, h)
        g.ndata["h"] = h
        return dgl.mean_nodes(g, "h")
import os

os.environ["DGLBACKEND"] = "pytorch"
import pandas as pd
import torch
import dgl
from dgl.data import DGLDataset

class EntitiesDataset(DGLDataset):
    def __init__(self, entitiesFile):
        self.entitiesFile = entitiesFile
        super().__init__(name="entities")

    def process(self):
        entities = pd.read_json(self.entitiesFile, lines=1)

        self.graphs = []
        self.labels = []

        for _, entity in entities.iterrows():
            a = []
            b = []
            r1_feat = []
            r2_feat = []
            for edge in entity["edges"]:
                a.append(edge["a"])
                b.append(edge["b"])
                r1_feat.append(edge["R1"])
                r2_feat.append(edge["R2"])
            a = torch.LongTensor(a)
            b = torch.LongTensor(b)
            edge_features = torch.LongTensor([r1_feat, r2_feat]).t()

            node_feat = [[node["totalValue"], node["items"]] for node in entity["records"]]
            node_features = torch.tensor(node_feat)

            g = dgl.graph((a, b), num_nodes=len(entity["records"]))
            g.edata["feat"] = edge_features
            g.ndata["feat"] = node_features
            g = dgl.add_self_loop(g)

            self.graphs.append(g)
            self.labels.append(entity["fraud"])

        self.labels = torch.LongTensor(self.labels)

    def __getitem__(self, i):
        return self.graphs[i], self.labels[i]

    def __len__(self):
        return len(self.graphs)

dataset = EntitiesDataset("./entities.jsonl")
print(dataset)
print(dataset[0])
{
  "fraud":1,
  "records":[
    {
      "id":0,
      "totalValue":85,
      "items":5
    }
  ],
  "edges":[
    
  ]
}
{
  "fraud":1,
  "records":[
    {
      "id":0,
      "totalValue":85,
      "items":2
    },
    {
      "id":1,
      "totalValue":31,
      "items":4
    },
    {
      "id":2,
      "totalValue":20,
      "items":9
    }
  ],
  "edges":[
    {
      "a":1,
      "b":0,
      "R1":1,
      "R2":1
    },
    {
      "a":2,
      "b":1,
      "R1":0,
      "R2":1
    }
  ]
}
import torch
from torch_geometric.data import InMemoryDataset
from tqdm import tqdm

class classDataset(InMemoryDataset):
    def __init__(self, root, transform=None, pre_transform=None):
        super(classDataset, self).__init__(root, transform, pre_transform)
        self.data, self.slices = torch.load(self.processed_paths[0])

    @property
    def raw_file_names(self):
        return []
    @property
    def processed_file_names(self):
        return ['./train_vec.csv']

    def download(self):
        pass
    
    def process(self):
        
        data_list = []
        # process by label_vec
        # treat each email in a label categories as a node, and therefore all emails in the same label form a graph
        grouped = df.groupby('label_vec') # group the preprocessed data by label_vec and iterate over these groups (use for)
        for label_vec, group in tqdm(grouped):
            # each iteration, the text_vec in each group are categorically encoded again
            label_email_id = LabelEncoder().fit_transform(group.text_vec) # since for each graph, the node index should count from 0. 
            group = group.reset_index(drop=True)
            group['label_email_id'] = label_email_id
            node_features = group.loc[group.label_vec==label_vec,['label_email_id','text_vec']].sort_values('label_email_id').label_vec.drop_duplicates().values

            node_features = torch.LongTensor(node_features).unsqueeze(1)
            target_nodes = group.label_email_id.values[1:]
            source_nodes = group.label_email_id.values[:-1]

            edge_index = torch.tensor([source_nodes,
                                   target_nodes], dtype=torch.long)
            x = node_features

            y = torch.FloatTensor([group.label_vec.values[0]]).type(torch.LongTensor)

            data = Data(x=x, edge_index=edge_index, y=y)
            data_list.append(data)
        
        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
class MyOwnDataset(Dataset):
    def __init__(self, root, transform=None, pre_transform=None):
        '''Root: where the dataset should be store, this folder is split into raw_dir (downloaded dataset)
        and processed_dir (processed data)'''
        super(cateDataset, self).__init__(root, transform, pre_transform)

    @property
    def raw_file_names(self):
            '''if this file exists in raw_dir, the download is not triggered,
            the download func is not implemented here)'''
        return 'cate_id_01.csv'

    @property
    def processed_file_names(self):
        '''if these files are found in raw_dir, processing is skipped'''

        return 'not_implemented.pt'

    def download(self):
        # Download to `self.raw_dir`.
        #path = download_url(url, self.raw_dir)
        pass

    def process(self):
        self.data = pd.read_csv(self.raw_paths[0])
        for index, mol in tqdm(self.data.iterrows(), total =self.data.shape[0]):
            cate = df["categories"]
            categories_main = df["categories_main"]
            #get node features
            node_feats = self._get_node_features(cate)
            # get edge features
            edge_feats = self._get_edge_features(cate)
            # get adjancy infor
            edge_index = self._get_adjacency_info(cate)
            
            #get labels info
            label = self._get_labels(categories_main)
            
            # create data object
            data = Data(x=node_feats, 
                       edge_index = edge_index,
                       edge_attr = edge_feats,
                       y = label)
            torch.save(data,
                      os.path.join(sef.processed_dir,
                                  f'data_{index}.pt))
             # Create data object
            data = Data(x=node_feats, 
                        edge_index=edge_index,
                        edge_attr=edge_feats,
                        y=label,
                        smiles=mol["smiles"]
                        ) 
            if self.test:
                torch.save(data, 
                    os.path.join(self.processed_dir, 
                                 f'data_test_{index}.pt'))
            else:
                torch.save(data, 
                    os.path.join(self.processed_dir, 
                                 f'data_{index}.pt'))
   def _get_node_features(self, mol):
        """ 
        This will return a matrix / 2d array of the shape
        [Number of Nodes, Node Feature size]
        """
        all_node_feats = []

        for atom in mol.GetAtoms():
            node_feats = []
            # Feature 1: Atomic number        
            node_feats.append(atom.GetAtomicNum())
            # Feature 2: Atom degree
            node_feats.append(atom.GetDegree())
            # Feature 3: Formal charge
            node_feats.append(atom.GetFormalCharge())
            # Feature 4: Hybridization
            node_feats.append(atom.GetHybridization())
            # Feature 5: Aromaticity
            node_feats.append(atom.GetIsAromatic())
            # Feature 6: Total Num Hs
            node_feats.append(atom.GetTotalNumHs())
            # Feature 7: Radical Electrons
            node_feats.append(atom.GetNumRadicalElectrons())
            # Feature 8: In Ring
            node_feats.append(atom.IsInRing())
            # Feature 9: Chirality
            node_feats.append(atom.GetChiralTag())

            # Append node features to matrix
            all_node_feats.append(node_feats)

        all_node_feats = np.asarray(all_node_feats)
        return torch.tensor(all_node_feats, dtype=torch.float)

    def _get_edge_features(self, mol):
        """ 
        This will return a matrix / 2d array of the shape
        [Number of edges, Edge Feature size]
        """
        all_edge_feats = []

        for bond in mol.GetBonds():
            edge_feats = []
            # Feature 1: Bond type (as double)
            edge_feats.append(bond.GetBondTypeAsDouble())
            # Feature 2: Rings
            edge_feats.append(bond.IsInRing())
            # Append node features to matrix (twice, per direction)
            all_edge_feats += [edge_feats, edge_feats]

        all_edge_feats = np.asarray(all_edge_feats)
        return torch.tensor(all_edge_feats, dtype=torch.float)

    def _get_adjacency_info(self, mol):
        """
        We could also use rdmolops.GetAdjacencyMatrix(mol)
        but we want to be sure that the order of the indices
        matches the order of the edge features
        """
        edge_indices = []
        for bond in mol.GetBonds():
            i = bond.GetBeginAtomIdx()
            j = bond.GetEndAtomIdx()
            edge_indices += [[i, j], [j, i]]

        edge_indices = torch.tensor(edge_indices)
        edge_indices = edge_indices.t().to(torch.long).view(2, -1)
        return edge_indices

    def _get_labels(self, label):
        label = np.asarray([label])
        return torch.tensor(label, dtype=torch.int64)

    def len(self):
        return self.data.shape[0]

    def get(self, idx):
        """ - Equivalent to __getitem__ in pytorch
            - Is not needed for PyG's InMemoryDataset
        """
        if self.test:
            data = torch.load(os.path.join(self.processed_dir, 
                                 f'data_test_{idx}.pt'))
        else:
            data = torch.load(os.path.join(self.processed_dir, 
                                 f'data_{idx}.pt'))   
        return data
# Load entire dataset
X, y = torch.load('some_training_set_with_labels.pt')
 
# Train model
for epoch in range(max_epochs):
    for i in range(n_batches):
        # Local batches and labels
        local_X, local_y = X[i*n_batches:(i+1)*n_batches,], y[i*n_batches:(i+1)*n_batches,]
 
        # Your model
        [...]
         
         
# other
# Unoptimized generator
training_generator = SomeSingleCoreGenerator('some_training_set_with_labels.pt')
 
# Train model
for epoch in range(max_epochs):
    for local_X, local_y in training_generator:
        # Your model
        [...]
# Unoptimized generator
training_generator = SomeSingleCoreGenerator('some_training_set_with_labels.pt')

# Train model
for epoch in range(max_epochs):
    for local_X, local_y in training_generator:
        # Your model
        [...]
# Load entire dataset
X, y = torch.load('some_training_set_with_labels.pt')

# Train model
for epoch in range(max_epochs):
    for i in range(n_batches):
        # Local batches and labels
        local_X, local_y = X[i*n_batches:(i+1)*n_batches,], y[i*n_batches:(i+1)*n_batches,]

        # Your model
        [...]
star

Tue Aug 22 2023 13:17:31 GMT+0000 (Coordinated Universal Time)

#entityresolution #frauddetection #gnn #machinelearning
star

Tue Aug 22 2023 13:16:36 GMT+0000 (Coordinated Universal Time)

#entityresolution #frauddetection #gnn #machinelearning
star

Tue Aug 22 2023 13:15:50 GMT+0000 (Coordinated Universal Time)

#entityresolution #frauddetection #gnn #machinelearning
star

Tue Aug 22 2023 13:14:50 GMT+0000 (Coordinated Universal Time)

#entityresolution #frauddetection #gnn #machinelearning
star

Tue Aug 22 2023 13:12:57 GMT+0000 (Coordinated Universal Time)

#entityresolution #frauddetection #gnn #machinelearning
star

Tue Aug 22 2023 13:10:59 GMT+0000 (Coordinated Universal Time)

#entityresolution #frauddetection #gnn #machinelearning
star

Wed Sep 01 2021 09:04:08 GMT+0000 (Coordinated Universal Time)

#gnn #dataset
star

Wed Sep 01 2021 09:03:36 GMT+0000 (Coordinated Universal Time)

#gnn #dataset
star

Fri Jul 02 2021 13:44:34 GMT+0000 (Coordinated Universal Time) https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel

#gnn #pytorch #loaddata #trainset
star

Fri Jul 02 2021 13:43:59 GMT+0000 (Coordinated Universal Time) https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel

#gnn #pytorch #trainset #loaddata

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension