Skip to content
Snippets Groups Projects
Commit a9036aaa authored by Mohamed Yacine Touahria Miliani's avatar Mohamed Yacine Touahria Miliani
Browse files

Merge branch 'master' into 'main'

Final build

See merge request !1
parents 204696ea b5c2a6ea
No related branches found
No related tags found
1 merge request!1Final build
Showing
with 492 additions and 0 deletions
File added
File added
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn import init
from HVGAE_AD.model.PVAE.manifolds import PoincareBall, Euclidean
from geoopt import ManifoldParameter
class RiemannianLayer(nn.Module):
def __init__(self, in_features, out_features, manifold, over_param, weight_norm):
super(RiemannianLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.manifold = manifold
self._weight = Parameter(torch.Tensor(out_features, in_features))
self.over_param = over_param
self.weight_norm = weight_norm
if self.over_param:
self._bias = ManifoldParameter(torch.Tensor(out_features, in_features), manifold=manifold)
else:
self._bias = Parameter(torch.Tensor(out_features, 1))
self.reset_parameters()
@property
def weight(self):
return self.manifold.transp0(self.bias, self._weight) # weight \in T_0 => weight \in T_bias
@property
def bias(self):
if self.over_param:
return self._bias
else:
return self.manifold.expmap0(self._weight * self._bias) # reparameterisation of a point on the manifold
def reset_parameters(self):
init.kaiming_normal_(self._weight, a=math.sqrt(5))
fan_in, _ = init._calculate_fan_in_and_fan_out(self._weight)
bound = 4 / math.sqrt(fan_in)
init.uniform_(self._bias, -bound, bound)
if self.over_param:
with torch.no_grad(): self._bias.set_(self.manifold.expmap0(self._bias))
class GeodesicLayer(RiemannianLayer):
def __init__(self, in_features, out_features, manifold, over_param=False, weight_norm=False):
super(GeodesicLayer, self).__init__(in_features, out_features, manifold, over_param, weight_norm)
def forward(self, input):
input = input.unsqueeze(-2).expand(*input.shape[:-(len(input.shape) - 2)], self.out_features, self.in_features)
res = self.manifold.normdist2plane(input, self.bias, self.weight,
signed=True, norm=self.weight_norm)
return res
class Linear(nn.Linear):
def __init__(self, in_features, out_features, **kwargs):
super(Linear, self).__init__(
in_features,
out_features,
)
class MobiusLayer(RiemannianLayer):
def __init__(self, in_features, out_features, manifold, over_param=False, weight_norm=False):
super(MobiusLayer, self).__init__(in_features, out_features, manifold, over_param, weight_norm)
def forward(self, input):
res = self.manifold.mobius_matvec(self.weight, input)
return res
class ExpZero(nn.Module):
def __init__(self, manifold):
super(ExpZero, self).__init__()
self.manifold = manifold
def forward(self, input):
return self.manifold.expmap0(input)
class LogZero(nn.Module):
def __init__(self, manifold):
super(LogZero, self).__init__()
self.manifold = manifold
def forward(self, input):
return self.manifold.logmap0(input)
import sys
import math
import time
import os
import shutil
import torch
import torch.distributions as dist
from torch.autograd import Variable, Function, grad
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
import argparse
import torch.nn as nn
import scipy.sparse as sp
def lexpand(A, *dimensions):
"""Expand tensor, adding new dimensions on left."""
return A.expand(tuple(dimensions) + A.shape)
def rexpand(A, *dimensions):
"""Expand tensor, adding new dimensions on right."""
return A.view(A.shape + (1,)*len(dimensions)).expand(A.shape + tuple(dimensions))
def assert_no_nan(name, g):
if torch.isnan(g).any(): raise Exception('nans in {}'.format(name))
def assert_no_grad_nan(name, x):
if x.requires_grad: x.register_hook(lambda g: assert_no_nan(name, g))
# Classes
class Constants(object):
eta = 1e-5
log2 = math.log(2)
logpi = math.log(math.pi)
log2pi = math.log(2 * math.pi)
logceilc = 88 # largest cuda v s.t. exp(v) < inf
logfloorc = -104 # smallest cuda v s.t. exp(v) > 0
invsqrt2pi = 1. / math.sqrt(2 * math.pi)
sqrthalfpi = math.sqrt(math.pi/2)
def logsinh(x):
# torch.log(sinh(x))
return x + torch.log(1 - torch.exp(-2 * x)) - Constants.log2
def logcosh(x):
# torch.log(cosh(x))
return x + torch.log(1 + torch.exp(-2 * x)) - Constants.log2
class Arccosh(Function):
# https://github.com/facebookresearch/poincare-embeddings/blob/master/model.py
@staticmethod
def forward(ctx, x):
ctx.z = torch.sqrt(x * x - 1)
return torch.log(x + ctx.z)
@staticmethod
def backward(ctx, g):
z = torch.clamp(ctx.z, min=Constants.eta)
z = g / z
return z
class Arcsinh(Function):
@staticmethod
def forward(ctx, x):
ctx.z = torch.sqrt(x * x + 1)
return torch.log(x + ctx.z)
@staticmethod
def backward(ctx, g):
z = torch.clamp(ctx.z, min=Constants.eta)
z = g / z
return z
# https://stackoverflow.com/questions/14906764/how-to-redirect-stdout-to-both-file-and-console-with-scripting
class Logger(object):
def __init__(self, filename):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
class Timer:
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.elapsed = self.end - self.begin
self.elapsedH = time.gmtime(self.elapsed)
print('====> [{}] Time: {:7.3f}s or {}'
.format(self.name,
self.elapsed,
time.strftime("%H:%M:%S", self.elapsedH)))
# Functions
def save_vars(vs, filepath):
"""
Saves variables to the given filepath in a safe manner.
"""
if os.path.exists(filepath):
shutil.copyfile(filepath, '{}.old'.format(filepath))
torch.save(vs, filepath)
def save_model(model, filepath):
"""
To load a saved model, simply use
`model.load_state_dict(torch.load('path-to-saved-model'))`.
"""
save_vars(model.state_dict(), filepath)
def log_mean_exp(value, dim=0, keepdim=False):
return log_sum_exp(value, dim, keepdim) - math.log(value.size(dim))
def log_sum_exp(value, dim=0, keepdim=False):
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))
def log_sum_exp_signs(value, signs, dim=0, keepdim=False):
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(signs * torch.exp(value0), dim=dim, keepdim=keepdim))
def get_mean_param(params):
"""Return the parameter used to show reconstructions or generations.
For example, the mean for Normal, or probs for Bernoulli.
For Bernoulli, skip first parameter, as that's (scalar) temperature
"""
if params[0].dim() == 0:
return params[1]
# elif len(params) == 3:
# return params[1]
else:
return params[0]
def probe_infnan(v, name, extras={}):
nps = torch.isnan(v)
s = nps.sum().item()
if s > 0:
print('>>> {} >>>'.format(name))
print(name, s)
print(v[nps])
for k, val in extras.items():
print(k, val, val.sum().item())
quit()
def has_analytic_kl(type_p, type_q):
return (type_p, type_q) in torch.distributions.kl._KL_REGISTRY
def split_data(labels, test_prop,val_prop):
nb_nodes = labels.shape[0]
all_idx = np.arange(nb_nodes)
pos_idx = labels.nonzero()[0]
neg_idx = (1. - labels).nonzero()[0]
np.random.shuffle(pos_idx)
np.random.shuffle(neg_idx)
pos_idx = pos_idx.tolist()
neg_idx = neg_idx.tolist()
nb_pos_neg = min(len(pos_idx), len(neg_idx))
nb_val = round(val_prop * nb_pos_neg)
nb_test = round(test_prop * nb_pos_neg)
idx_val_pos, idx_test_pos, idx_train_pos = pos_idx[:nb_val], pos_idx[nb_val:nb_val + nb_test], pos_idx[
nb_val + nb_test:]
idx_val_neg, idx_test_neg, idx_train_neg = neg_idx[:nb_val], neg_idx[nb_val:nb_val + nb_test], neg_idx[
nb_val + nb_test:]
return idx_val_pos + idx_val_neg, idx_test_pos + idx_test_neg, idx_train_pos + idx_train_neg
def process_data(args, adj,features,labels):
data = process_data_nc(args,adj,features,labels)
data['adj_train'], data['features'] = process(
data['adj_train'], data['features'],args.normalize_adj,args.normalize_feats
)
return data
def process_data_nc(args,adj,features,labels):
idx_test, idx_train , idx_val= split_data(labels, args.test_prop,args.val_prop)
labels = torch.LongTensor(labels)
data = {'adj_train': sp.csr_matrix(adj), 'features': features, 'labels': labels, 'idx_train': idx_train, 'idx_test': idx_test , 'idx_val':idx_val}
return data
def process(adj, features, normalize_adj, normalize_feats):
if sp.isspmatrix(features):
features = np.array(features.todense())
if normalize_feats:
features = normalize(features)
features = torch.Tensor(features)
if normalize_adj:
adj = normalize(adj)
adj = sparse_mx_to_torch_sparse_tensor(adj)
return adj, features
def normalize(mx):
"""Row-normalize sparse matrix."""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo()
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)
)
values = torch.Tensor(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def get_activation(args):
if args.act == 'leaky_relu':
return nn.LeakyReLU(args.alpha)
elif args.act == 'rrelu':
return nn.RReLU()
elif args.act == 'relu':
return nn.ReLU()
elif args.act == 'elu':
return nn.ELU()
elif args.act == 'prelu':
return nn.PReLU()
elif args.act == 'selu':
return nn.SELU()
\ No newline at end of file
File added
File added
File added
File added
File added
import numpy as np
import torch
import pickle
from scipy.sparse import csr_matrix
import scipy.sparse as sp
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo()
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)
)
values = torch.Tensor(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def prepare(t,device,data_path):
path = '{}labels_{}.pkl'.format(data_path,t)
with open(path,'rb') as f:
labels = pickle.load(f)
path = '{}features_{}.pkl'.format(data_path,t)
with open(path,'rb') as f:
features = pickle.load(f)
path = '{}adjacency_{}.pkl'.format(data_path,t)
with open(path,'rb') as f:
adj = pickle.load(f)
adj = sparse_mx_to_torch_sparse_tensor(sp.csr_matrix(adj)).to(device)
features = torch.from_numpy(features).float().to(device)
labels = torch.from_numpy(labels).long().to(device)
return adj,features,labels
\ No newline at end of file
import torch
def cosh(x, clamp=15):
return x.clamp(-clamp, clamp).cosh()
def sinh(x, clamp=15):
return x.clamp(-clamp, clamp).sinh()
def tanh(x, clamp=15):
return x.clamp(-clamp, clamp).tanh()
def arcosh(x):
return Arcosh.apply(x)
def arsinh(x):
return Arsinh.apply(x)
def artanh(x):
return Artanh.apply(x)
class Artanh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(-1 + 1e-15, 1 - 1e-15)
ctx.save_for_backward(x)
z = x.double()
return (torch.log_(1 + z).sub_(torch.log_(1 - z))).mul_(0.5).to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
_input, = ctx.saved_tensors
return grad_output / (1 - _input ** 2)
class Arsinh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
z = x.double()
return (z + torch.sqrt_(1 + z.pow(2))).clamp_min_(1e-15).log_().to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
_input, = ctx.saved_tensors
return grad_output / (1 + _input ** 2) ** 0.5
class Arcosh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(min=1.0 + 1e-10)
ctx.save_for_backward(x)
z = x.double()
return (z + torch.sqrt_(z.pow(2) - 1)).clamp_min_(1e-15).log_().to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
_input, = ctx.saved_tensors
return grad_output / (_input ** 2 - 1) ** 0.5
import random
import torch
import numpy as np
import logging
from HVGAE_AD.config import args
def init_logger(log_file=None):
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
# if log_file and log_file != '':
# file_handler = logging.FileHandler(log_file)
# file_handler.setFormatter(log_format)
# logger.addHandler(file_handler)
return logger
logger = init_logger(args.log_file)
def set_random(random_seed):
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
# torch.use_deterministic_algorithms(True)
logger.info('fixed random seed')
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment