-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathutils.py
More file actions
266 lines (223 loc) · 9.46 KB
/
utils.py
File metadata and controls
266 lines (223 loc) · 9.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
"""
Created on Wed Sep 18 20:27:19 2019
@author: MOHAMEDR002
"""
import numpy as np
import torch
import random
from torch.utils.data import TensorDataset, DataLoader
import math
from torch import nn
from torch.utils.data import Dataset
from torch.autograd import Function
import torch.nn.functional as F
import torch.distributions as dist
import logging
import os
from datetime import datetime
import sys
device = torch.device('cuda')
def _logger(logger_name, level=logging.DEBUG):
"""
Method to return a custom logger with the given name and level
"""
logger = logging.getLogger(logger_name)
logger.setLevel(level)
format_string = "%(message)s"
log_format = logging.Formatter(format_string)
# Creating and adding the console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_format)
logger.addHandler(console_handler)
# Creating and adding the file handler
file_handler = logging.FileHandler(logger_name, mode='a')
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger
def starting_logs(data_type, da_method, exp_log_dir, src_id, tgt_id, run_id):
log_dir = os.path.join(exp_log_dir, src_id + "_to_" + tgt_id + "_run_" + str(run_id))
os.makedirs(log_dir, exist_ok=True)
log_file_name = os.path.join(log_dir, f"logs_{da_method}_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log")
logger = _logger(log_file_name)
logger.debug("=" * 45)
logger.debug(f'Dataset: {data_type}')
logger.debug(f'Method: {da_method}')
logger.debug("=" * 45)
logger.debug(f'Source: {src_id} ---> Target: {tgt_id}')
logger.debug(f'Run ID: {run_id}')
logger.debug("=" * 45)
return logger, log_dir
def set_requires_grad(model, requires_grad=True):
for param in model.parameters():
param.requires_grad = requires_grad
def loop_iterable(iterable):
while True:
yield from iterable
def fix_randomness(SEED):
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
def scoring_func(error_arr):
pos_error_arr = error_arr[error_arr >= 0]
neg_error_arr = error_arr[error_arr < 0]
score = 0
for error in neg_error_arr:
score = math.exp(-(error / 13)) - 1 + score
for error in pos_error_arr:
score = math.exp(error / 10) - 1 + score
return score
def roll(x, shift: int, dim: int = -1, fill_pad: int = None):
if 0 == shift:
return x
elif shift < 0:
shift = -shift
gap = x.index_select(dim, torch.arange(shift))
if fill_pad is not None:
gap = fill_pad * torch.ones_like(gap, device=x.device)
return torch.cat([x.index_select(dim, torch.arange(shift, x.size(dim))), gap], dim=dim)
else:
shift = x.size(dim) - shift
gap = x.index_select(dim, torch.arange(shift, x.size(dim)))
if fill_pad is not None:
gap = fill_pad * torch.ones_like(gap, device=x.device)
return torch.cat([gap, x.index_select(dim, torch.arange(shift))], dim=dim)
def init_weights(m):
for name, param in m.named_parameters():
nn.init.uniform_(param.data, -0.08, 0.08)
# if name=='weight':
# nn.init.kaiming_uniform_(param.data)
# else:
# torch.nn.init.zeros_(param.data)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class RMSELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self,yhat,y):
return torch.sqrt(self.mse(yhat,y))
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score - self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), 'checkpoint.pt')
self.val_loss_min = val_loss
class Stage_Wise_Alignment(nn.Module):
def __init__(self, kernel_mul=2.0, kernel_num=5, fix_sigma=None, **kwargs):
super(Stage_Wise_Alignment, self).__init__()
self.kernel_num = kernel_num
self.kernel_mul = kernel_mul
self.fix_sigma = None
def guassian_kernel(self, source, target, kernel_mul, kernel_num, fix_sigma):
n_samples = int(source.size()[0]) + int(target.size()[0])
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(
int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(
int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0-total1)**2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i)
for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp)
for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def forward(self, source, target):
batch_size = int(source.size()[0])
kernels = self.guassian_kernel(
source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma)
XY = torch.mean(kernels[:batch_size, batch_size:])
YX = torch.mean(kernels[batch_size:, :batch_size])
loss = torch.mean( - XY - YX)
return loss
def NIG_NLL(y, gamma, v, alpha, beta, w_i_dis_mean, quantile, reduce=True):
tau_two = 2.0 / (quantile * (1.0 - quantile))
twoBlambda = 2.0 * 2.0 * beta * (1.0 + tau_two * w_i_dis_mean * v)
nll = 0.5 * torch.log(torch.tensor(np.pi) / v) \
- alpha * torch.log(twoBlambda) \
+ (alpha + 0.5) * torch.log(v * (y - gamma) ** 2 + twoBlambda) \
+ torch.lgamma(alpha) \
- torch.lgamma(alpha + 0.5)
return torch.mean(nll) if reduce else nll
def KL_NIG(mu1, v1, a1, b1, mu2, v2, a2, b2):
KL = 0.5 * (a1 - 1) / b1 * (v2 * (mu2 - mu1) ** 2) \
+ 0.5 * v2 / v1 \
- 0.5 * torch.log(torch.abs(v2) / torch.abs(v1)) \
- 0.5 + a2 * torch.log(b1 / b2) \
- (torch.lgamma(a1) - torch.lgamma(a2)) \
+ (a1 - a2) * torch.digamma(a1) \
- (b1 - b2) * a1 / b1
return KL
def tilted_loss(q, e):
return torch.maximum(q * e, (q - 1) * e)
def NIG_Reg(y, gamma, v, alpha, beta, w_i_dis_mean, quantile, omega=0.01, reduce=True, kl=False):
tau_two = 2.0 / (quantile * (1.0 - quantile))
error = tilted_loss(quantile, y - gamma)
if kl:
kl = KL_NIG(gamma, v, alpha, beta, gamma, omega, 1 + omega, beta)
reg = error * kl
else:
evi = 2 * v + alpha + 1 / beta
reg = error * evi
return torch.mean(reg) if reduce else reg
def quant_evi_loss(y_true, gamma, v, alpha, beta, quantile, coeff=1.0, reduce=True):
theta = (1.0 - 2.0 * quantile) / (quantile * (1.0 - quantile))
mean_ = beta / (alpha - 1)
w_i_dis = dist.Exponential(rate=1 / mean_)
w_i_dis_mean = w_i_dis.mean
mu = gamma + theta * w_i_dis_mean
loss_nll = NIG_NLL(y_true, mu, v, alpha, beta, w_i_dis_mean, quantile, reduce=reduce)
loss_reg = NIG_Reg(y_true, gamma, v, alpha, beta, w_i_dis_mean, quantile, reduce=reduce)
return loss_nll + coeff * loss_reg
def quant_evi_loss_upt(y_true, gamma, v, alpha, beta, quantile, coeff=1.0, reduce=True):
with torch.no_grad():
gamma = gamma.clone()
loss_one = quant_evi_loss(y_true, gamma.detach(), v, alpha, beta, quantile, coeff=coeff, reduce=False)
error_loss = tilted_loss(quantile, y_true - gamma)
reg = 1e-2 * (error_loss + loss_one) # This seems to be the intended variable, not 'reg' from NIG_Reg
#print(error_loss, loss_one)
return torch.mean(reg) if reduce else reg