repo
stringlengths
2
99
file
stringlengths
14
239
code
stringlengths
20
3.99M
file_length
int64
20
3.99M
avg_line_length
float64
9.73
128
max_line_length
int64
11
86.4k
extension_type
stringclasses
1 value
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/codecs/text_codec/tokenize.py
import torch import torch.nn as nn from image_synthesis.modeling.modules.clip.clip import tokenize from image_synthesis.modeling.codecs.base_codec import BaseCodec from image_synthesis.utils.misc import instantiate_from_config class Tokenize(BaseCodec): def __init__(self, context_length:int = 256, add_start_and_end:bool = False, just_token = False, with_mask:bool = True, pad_value:int = 0, clip_embedding = False, condition_emb_config = None, tokenizer_config={ 'target': 'image_synthesis.modeling.modules.clip.simple_tokenizer.SimpleTokenizer', 'params':{ 'end_idx': 49152 # 16384 fo DALL-E }, }, ): """ This is a wrapper class for tokenize of texts. For CLIP and DALLE-pytorch tokenize, the default arguments are different: CLIP based: context_length: 77 add_start_and_end: True DALLE-pytorch based: context_length: 256 add_start_and_end: False """ super().__init__() self.context_length = context_length self.add_start_and_end = add_start_and_end self.with_mask = with_mask self.pad_value = pad_value self.just_token = just_token self.trainable = False self.condition_emb = None self.clip_embedding = clip_embedding if self.clip_embedding == True: assert condition_emb_config != None self.condition_emb = instantiate_from_config(condition_emb_config) self.tokenizer = instantiate_from_config(tokenizer_config) def __repr__(self): rep = "Tokenize for text\n\tcontent_length: {}\n\tadd_start_and_end: {}\n\twith_mask: {}"\ .format(self.context_length, self.add_start_and_end, self.with_mask) return rep def check_length(self, token): return len(token) <= self.context_length def get_tokens(self, text, **kwargs): text_token = tokenize(text, context_length=self.context_length, add_start_and_end=self.add_start_and_end, with_mask=self.with_mask, pad_value=self.pad_value, tokenizer=self.tokenizer, just_token=self.just_token) if self.clip_embedding == False: return text_token else: if self.condition_emb.additional_last_embedding == True: with torch.no_grad(): cond_emb, last_embedding = self.condition_emb(text_token['token'].cuda()) text_token['embed_token'] = cond_emb.detach() text_token['last_embed'] = last_embedding else: with torch.no_grad(): cond_emb = self.condition_emb(text_token['token'].cuda()) text_token['embed_token'] = cond_emb.detach() return text_token
3,124
36.202381
104
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/models/conditional_dalle.py
# VQ-Diffusion import torch import math from torch import nn from image_synthesis.utils.misc import instantiate_from_config import time import numpy as np from PIL import Image import os from torch.cuda.amp import autocast class C_DALLE(nn.Module): def __init__( self, *, content_info={'key': 'image'}, condition_info={'key': 'label'}, guidance_scale=1.0, learnable_cf=False, content_codec_config, diffusion_config ): super().__init__() self.content_info = content_info self.condition_info = condition_info self.guidance_scale = guidance_scale self.content_codec = instantiate_from_config(content_codec_config) self.transformer = instantiate_from_config(diffusion_config) self.truncation_forward = False def parameters(self, recurse=True, name=None): # return super().parameters(recurse=True) if name is None or name == 'none': return super().parameters(recurse=recurse) else: names = name.split('+') params = [] for n in names: try: # the parameters() method is not overwritten for some classes params += getattr(self, name).parameters(recurse=recurse, name=name) except: params += getattr(self, name).parameters(recurse=recurse) return params @property def device(self): return self.transformer.device def get_ema_model(self): return self.transformer @torch.no_grad() def prepare_condition(self, batch): cond_key = self.condition_info['key'] cond = batch[cond_key] if torch.is_tensor(cond): cond = cond.to(self.device) cond_ = {} cond_['condition_token'] = cond return cond_ @autocast(enabled=False) @torch.no_grad() def prepare_content(self, batch, with_mask=False): cont_key = self.content_info['key'] cont = batch[cont_key] if torch.is_tensor(cont): cont = cont.to(self.device) if not with_mask: cont = self.content_codec.get_tokens(cont) else: mask = batch['mask'.format(cont_key)] cont = self.content_codec.get_tokens(cont, mask, enc_with_mask=False) cont_ = {} for k, v in cont.items(): v = v.to(self.device) if torch.is_tensor(v) else v cont_['content_' + k] = v return cont_ @torch.no_grad() def prepare_input(self, batch): input = self.prepare_condition(batch) input.update(self.prepare_content(batch)) return input def predict_start_with_truncation(self, func, sample_type): if sample_type[-1] == 'p': truncation_k = int(sample_type[:-1].replace('top', '')) content_codec = self.content_codec save_path = self.this_save_path def wrapper(*args, **kwards): out = func(*args, **kwards) val, ind = out.topk(k = truncation_k, dim=1) probs = torch.full_like(out, -70) probs.scatter_(1, ind, val) return probs return wrapper elif sample_type[-1] == 'r': truncation_r = float(sample_type[:-1].replace('top', '')) def wrapper(*args, **kwards): out = func(*args, **kwards) temp, indices = torch.sort(out, 1, descending=True) temp1 = torch.exp(temp) temp2 = temp1.cumsum(dim=1) temp3 = temp2 < truncation_r new_temp = torch.full_like(temp3[:,0:1,:], True) temp6 = torch.cat((new_temp, temp3), dim=1) temp3 = temp6[:,:-1,:] temp4 = temp3.gather(1, indices.argsort(1)) temp5 = temp4.float()*out+(1-temp4.float())*(-70) probs = temp5 return probs return wrapper else: print("wrong sample type") @torch.no_grad() def generate_content( self, *, batch, condition=None, filter_ratio = 0.5, temperature = 1.0, content_ratio = 0.0, replicate=1, return_att_weight=False, sample_type="normal", ): self.eval() if type(batch['label']) == list: batch['label']=torch.tensor(batch['label']) if condition is None: condition = self.prepare_condition(batch=batch) else: condition = self.prepare_condition(batch=None, condition=condition) # content = None if replicate != 1: for k in condition.keys(): if condition[k] is not None: condition[k] = torch.cat([condition[k] for _ in range(replicate)], dim=0) content_token = None guidance_scale = self.guidance_scale cf_cond_emb = torch.ones(len(batch['label']) * replicate).to(self.device) * 1000 def cf_predict_start(log_x_t, cond_emb, t): log_x_recon = self.transformer.predict_start(log_x_t, cond_emb, t)[:, :-1] if abs(guidance_scale - 1) < 1e-3: return torch.cat((log_x_recon, self.transformer.zero_vector), dim=1) cf_log_x_recon = self.transformer.predict_start(log_x_t, cf_cond_emb.type_as(cond_emb), t)[:, :-1] log_new_x_recon = cf_log_x_recon + guidance_scale * (log_x_recon - cf_log_x_recon) log_new_x_recon -= torch.logsumexp(log_new_x_recon, dim=1, keepdim=True) log_new_x_recon = log_new_x_recon.clamp(-70, 0) log_pred = torch.cat((log_new_x_recon, self.transformer.zero_vector), dim=1) return log_pred if sample_type.split(',')[0][:3] == "top" and self.truncation_forward == False: self.transformer.cf_predict_start = self.predict_start_with_truncation(cf_predict_start, sample_type.split(',')[0]) self.truncation_forward = True trans_out = self.transformer.sample(condition_token=condition['condition_token'], condition_mask=condition.get('condition_mask', None), condition_embed=condition.get('condition_embed_token', None), content_token=content_token, filter_ratio=filter_ratio, temperature=temperature, return_att_weight=return_att_weight, return_logits=False, print_log=False, sample_type=sample_type) content = self.content_codec.decode(trans_out['content_token']) #(8,1024)->(8,3,256,256) self.train() out = { 'content': content } return out @torch.no_grad() def reconstruct( self, input ): if torch.is_tensor(input): input = input.to(self.device) cont = self.content_codec.get_tokens(input) cont_ = {} for k, v in cont.items(): v = v.to(self.device) if torch.is_tensor(v) else v cont_['content_' + k] = v rec = self.content_codec.decode(cont_['content_token']) return rec @torch.no_grad() def sample( self, batch, clip = None, temperature = 1., return_rec = True, filter_ratio = [0, 0.5, 1.0], content_ratio = [1], # the ratio to keep the encoded content tokens return_att_weight=False, return_logits=False, sample_type="normal", **kwargs, ): self.eval() condition = self.prepare_condition(batch) content = self.prepare_content(batch) content_samples = {'input_image': batch[self.content_info['key']]} if return_rec: content_samples['reconstruction_image'] = self.content_codec.decode(content['content_token']) # import pdb; pdb.set_trace() for fr in filter_ratio: for cr in content_ratio: num_content_tokens = int((content['content_token'].shape[1] * cr)) if num_content_tokens < 0: continue else: content_token = content['content_token'][:, :num_content_tokens] if sample_type == 'debug': trans_out = self.transformer.sample_debug(condition_token=condition['condition_token'], condition_mask=condition.get('condition_mask', None), condition_embed=condition.get('condition_embed_token', None), content_token=content_token, filter_ratio=fr, temperature=temperature, return_att_weight=return_att_weight, return_logits=return_logits, content_logits=content.get('content_logits', None), sample_type=sample_type, **kwargs) else: trans_out = self.transformer.sample(condition_token=condition['condition_token'], condition_mask=condition.get('condition_mask', None), condition_embed=condition.get('condition_embed_token', None), content_token=content_token, filter_ratio=fr, temperature=temperature, return_att_weight=return_att_weight, return_logits=return_logits, content_logits=content.get('content_logits', None), sample_type=sample_type, **kwargs) content_samples['cond1_cont{}_fr{}_image'.format(cr, fr)] = self.content_codec.decode(trans_out['content_token']) if return_att_weight: content_samples['cond1_cont{}_fr{}_image_condition_attention'.format(cr, fr)] = trans_out['condition_attention'] # B x Lt x Ld content_att = trans_out['content_attention'] shape = *content_att.shape[:-1], self.content.token_shape[0], self.content.token_shape[1] content_samples['cond1_cont{}_fr{}_image_content_attention'.format(cr, fr)] = content_att.view(*shape) # B x Lt x Lt -> B x Lt x H x W if return_logits: content_samples['logits'] = trans_out['logits'] self.train() output = {'condition': batch[self.condition_info['key']]} output.update(content_samples) return output def forward( self, batch, name='none', **kwargs ): input = self.prepare_input(batch) output = self.transformer(input, **kwargs) return output
11,968
40.559028
154
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/models/unconditional_dalle.py
# VQ-Diffusion import torch import math from torch import nn from image_synthesis.utils.misc import instantiate_from_config import time import numpy as np from PIL import Image import os from torch.cuda.amp import autocast class UC_DALLE(nn.Module): def __init__( self, *, content_info={'key': 'image'}, content_codec_config, diffusion_config ): super().__init__() self.content_info = content_info self.content_codec = instantiate_from_config(content_codec_config) self.transformer = instantiate_from_config(diffusion_config) self.truncation_forward = False def parameters(self, recurse=True, name=None): if name is None or name == 'none': return super().parameters(recurse=recurse) else: names = name.split('+') params = [] for n in names: try: # the parameters() method is not overwritten for some classes params += getattr(self, name).parameters(recurse=recurse, name=name) except: params += getattr(self, name).parameters(recurse=recurse) return params @property def device(self): return self.transformer.device def get_ema_model(self): return self.transformer @autocast(enabled=False) @torch.no_grad() def prepare_content(self, batch, with_mask=False): cont_key = self.content_info['key'] cont = batch[cont_key] if torch.is_tensor(cont): cont = cont.to(self.device) if not with_mask: cont = self.content_codec.get_tokens(cont) else: mask = batch['mask'.format(cont_key)] cont = self.content_codec.get_tokens(cont, mask, enc_with_mask=False) cont_ = {} for k, v in cont.items(): v = v.to(self.device) if torch.is_tensor(v) else v cont_['content_' + k] = v return cont_ @torch.no_grad() def prepare_input(self, batch): input = self.prepare_content(batch) return input def predict_start_with_truncation(self, func, sample_type): if sample_type[-1] == 'p': truncation_k = int(sample_type[:-1].replace('top', '')) content_codec = self.content_codec save_path = self.this_save_path def wrapper(*args, **kwards): out = func(*args, **kwards) val, ind = out.topk(k = truncation_k, dim=1) probs = torch.full_like(out, -70) probs.scatter_(1, ind, val) return probs return wrapper elif sample_type[-1] == 'r': truncation_r = float(sample_type[:-1].replace('top', '')) def wrapper(*args, **kwards): out = func(*args, **kwards) temp, indices = torch.sort(out, 1, descending=True) temp1 = torch.exp(temp) temp2 = temp1.cumsum(dim=1) temp3 = temp2 < truncation_r new_temp = torch.full_like(temp3[:,0:1,:], True) temp6 = torch.cat((new_temp, temp3), dim=1) temp3 = temp6[:,:-1,:] temp4 = temp3.gather(1, indices.argsort(1)) temp5 = temp4.float()*out+(1-temp4.float())*(-70) probs = temp5 return probs return wrapper else: print("wrong sample type") @torch.no_grad() def generate_content( self, *, batch, filter_ratio = 0.5, temperature = 1.0, content_ratio = 0.0, replicate=1, return_att_weight=False, sample_type="normal", ): self.eval() content_token = None if sample_type.split(',')[0][:3] == "top" and self.truncation_forward == False: self.transformer.predict_start = self.predict_start_with_truncation(self.transformer.predict_start, sample_type.split(',')[0]) self.truncation_forward = True trans_out = self.transformer.sample(condition_token=None, condition_mask=None, condition_embed=None, content_token=content_token, filter_ratio=filter_ratio, temperature=temperature, return_att_weight=return_att_weight, return_logits=False, print_log=False, sample_type=sample_type, batch_size=replicate) content = self.content_codec.decode(trans_out['content_token']) #(8,1024)->(8,3,256,256) self.train() out = { 'content': content } return out @torch.no_grad() def reconstruct( self, input ): if torch.is_tensor(input): input = input.to(self.device) cont = self.content_codec.get_tokens(input) cont_ = {} for k, v in cont.items(): v = v.to(self.device) if torch.is_tensor(v) else v cont_['content_' + k] = v rec = self.content_codec.decode(cont_['content_token']) return rec @torch.no_grad() def sample( self, batch, clip = None, temperature = 1., return_rec = True, filter_ratio = [0], content_ratio = [1], # the ratio to keep the encoded content tokens return_att_weight=False, return_logits=False, sample_type="normal", **kwargs, ): self.eval() content = self.prepare_content(batch) content_samples = {'input_image': batch[self.content_info['key']]} if return_rec: content_samples['reconstruction_image'] = self.content_codec.decode(content['content_token']) # import pdb; pdb.set_trace() for fr in filter_ratio: for cr in content_ratio: num_content_tokens = int((content['content_token'].shape[1] * cr)) if num_content_tokens < 0: continue else: content_token = content['content_token'][:, :num_content_tokens] trans_out = self.transformer.sample(condition_token=None, condition_mask=None, condition_embed=None, content_token=content_token, filter_ratio=fr, temperature=temperature, return_att_weight=return_att_weight, return_logits=return_logits, content_logits=content.get('content_logits', None), sample_type=sample_type, batch_size=batch[self.content_info['key']].shape[0], **kwargs) content_samples['cond1_cont{}_fr{}_image'.format(cr, fr)] = self.content_codec.decode(trans_out['content_token']) if return_logits: content_samples['logits'] = trans_out['logits'] self.train() output = {} output.update(content_samples) return output def forward( self, batch, name='none', **kwargs ): input = self.prepare_input(batch) output = self.transformer(input, **kwargs) return output
8,216
35.52
138
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/models/dalle.py
# VQ-Diffusion import torch import math from torch import nn from image_synthesis.utils.misc import instantiate_from_config import time import numpy as np from PIL import Image import os from torch.cuda.amp import autocast class DALLE(nn.Module): def __init__( self, *, content_info={'key': 'image'}, condition_info={'key': 'text'}, learnable_cf=False, content_codec_config, condition_codec_config, diffusion_config ): super().__init__() self.content_info = content_info self.condition_info = condition_info self.guidance_scale = 1.0 self.learnable_cf = learnable_cf self.content_codec = instantiate_from_config(content_codec_config) self.condition_codec = instantiate_from_config(condition_codec_config) self.transformer = instantiate_from_config(diffusion_config) self.truncation_forward = False def parameters(self, recurse=True, name=None): if name is None or name == 'none': return super().parameters(recurse=recurse) else: names = name.split('+') params = [] for n in names: try: # the parameters() method is not overwritten for some classes params += getattr(self, name).parameters(recurse=recurse, name=name) except: params += getattr(self, name).parameters(recurse=recurse) return params @property def device(self): return self.transformer.device def get_ema_model(self): return self.transformer @torch.no_grad() def prepare_condition(self, batch, condition=None): cond_key = self.condition_info['key'] cond = batch[cond_key] if condition is None else condition if torch.is_tensor(cond): cond = cond.to(self.device) cond = self.condition_codec.get_tokens(cond) cond_ = {} for k, v in cond.items(): v = v.to(self.device) if torch.is_tensor(v) else v cond_['condition_' + k] = v return cond_ @autocast(enabled=False) @torch.no_grad() def prepare_content(self, batch, with_mask=False): cont_key = self.content_info['key'] cont = batch[cont_key] if torch.is_tensor(cont): cont = cont.to(self.device) if not with_mask: cont = self.content_codec.get_tokens(cont) else: mask = batch['mask'.format(cont_key)] cont = self.content_codec.get_tokens(cont, mask, enc_with_mask=False) cont_ = {} for k, v in cont.items(): v = v.to(self.device) if torch.is_tensor(v) else v cont_['content_' + k] = v return cont_ @autocast(enabled=False) @torch.no_grad() def prepare_input(self, batch): input = self.prepare_condition(batch) input.update(self.prepare_content(batch)) return input def p_sample_with_truncation(self, func, sample_type): truncation_rate = float(sample_type.replace('q', '')) def wrapper(*args, **kwards): out = func(*args, **kwards) import random if random.random() < truncation_rate: out = func(out, args[1], args[2], **kwards) return out return wrapper def predict_start_with_truncation(self, func, sample_type): if sample_type[-1] == 'p': truncation_k = int(sample_type[:-1].replace('top', '')) content_codec = self.content_codec save_path = self.this_save_path def wrapper(*args, **kwards): out = func(*args, **kwards) val, ind = out.topk(k = truncation_k, dim=1) probs = torch.full_like(out, -70) probs.scatter_(1, ind, val) return probs return wrapper elif sample_type[-1] == 'r': truncation_r = float(sample_type[:-1].replace('top', '')) def wrapper(*args, **kwards): out = func(*args, **kwards) # notice for different batches, out are same, we do it on out[0] temp, indices = torch.sort(out, 1, descending=True) temp1 = torch.exp(temp) temp2 = temp1.cumsum(dim=1) temp3 = temp2 < truncation_r new_temp = torch.full_like(temp3[:,0:1,:], True) temp6 = torch.cat((new_temp, temp3), dim=1) temp3 = temp6[:,:-1,:] temp4 = temp3.gather(1, indices.argsort(1)) temp5 = temp4.float()*out+(1-temp4.float())*(-70) probs = temp5 return probs return wrapper else: print("wrong sample type") @torch.no_grad() def generate_content( self, *, batch, condition=None, filter_ratio = 0.5, temperature = 1.0, content_ratio = 0.0, replicate=1, return_att_weight=False, sample_type="top0.85r", ): self.eval() if condition is None: condition = self.prepare_condition(batch=batch) else: condition = self.prepare_condition(batch=None, condition=condition) batch_size = len(batch['text']) * replicate if self.learnable_cf: cf_cond_emb = self.transformer.empty_text_embed.unsqueeze(0).repeat(batch_size, 1, 1) else: batch['text'] = [''] * batch_size cf_condition = self.prepare_condition(batch=batch) cf_cond_emb = self.transformer.condition_emb(cf_condition['condition_token']).float() def cf_predict_start(log_x_t, cond_emb, t): log_x_recon = self.transformer.predict_start(log_x_t, cond_emb, t)[:, :-1] if abs(self.guidance_scale - 1) < 1e-3: return torch.cat((log_x_recon, self.transformer.zero_vector), dim=1) cf_log_x_recon = self.transformer.predict_start(log_x_t, cf_cond_emb.type_as(cond_emb), t)[:, :-1] log_new_x_recon = cf_log_x_recon + self.guidance_scale * (log_x_recon - cf_log_x_recon) log_new_x_recon -= torch.logsumexp(log_new_x_recon, dim=1, keepdim=True) log_new_x_recon = log_new_x_recon.clamp(-70, 0) log_pred = torch.cat((log_new_x_recon, self.transformer.zero_vector), dim=1) return log_pred if replicate != 1: for k in condition.keys(): if condition[k] is not None: condition[k] = torch.cat([condition[k] for _ in range(replicate)], dim=0) content_token = None if len(sample_type.split(',')) > 1: if sample_type.split(',')[1][:1]=='q': self.transformer.p_sample = self.p_sample_with_truncation(self.transformer.p_sample, sample_type.split(',')[1]) if sample_type.split(',')[0][:3] == "top" and self.truncation_forward == False: self.transformer.cf_predict_start = self.predict_start_with_truncation(cf_predict_start, sample_type.split(',')[0]) self.truncation_forward = True if len(sample_type.split(',')) == 2 and sample_type.split(',')[1][:4]=='time' and int(float(sample_type.split(',')[1][4:])) >= 2: trans_out = self.transformer.sample_fast(condition_token=condition['condition_token'], condition_mask=condition.get('condition_mask', None), condition_embed=condition.get('condition_embed_token', None), content_token=content_token, filter_ratio=filter_ratio, temperature=temperature, return_att_weight=return_att_weight, return_logits=False, print_log=False, sample_type=sample_type, skip_step=int(float(sample_type.split(',')[1][4:])-1)) else: if 'time' in sample_type and float(sample_type.split(',')[1][4:]) < 1: self.transformer.prior_ps = int(1024 // self.transformer.num_timesteps * float(sample_type.split(',')[1][4:])) if self.transformer.prior_rule == 0: self.transformer.prior_rule = 1 self.transformer.update_n_sample() trans_out = self.transformer.sample(condition_token=condition['condition_token'], condition_mask=condition.get('condition_mask', None), condition_embed=condition.get('condition_embed_token', None), content_token=content_token, filter_ratio=filter_ratio, temperature=temperature, return_att_weight=return_att_weight, return_logits=False, print_log=False, sample_type=sample_type) content = self.content_codec.decode(trans_out['content_token']) #(8,1024)->(8,3,256,256) self.train() out = { 'content': content } return out @torch.no_grad() def reconstruct( self, input ): if torch.is_tensor(input): input = input.to(self.device) cont = self.content_codec.get_tokens(input) cont_ = {} for k, v in cont.items(): v = v.to(self.device) if torch.is_tensor(v) else v cont_['content_' + k] = v rec = self.content_codec.decode(cont_['content_token']) return rec @torch.no_grad() def sample( self, batch, clip = None, temperature = 1., return_rec = True, filter_ratio = [0, 0.5, 1.0], content_ratio = [1], # the ratio to keep the encoded content tokens return_att_weight=False, return_logits=False, sample_type="normal", **kwargs, ): self.eval() condition = self.prepare_condition(batch) content = self.prepare_content(batch) content_samples = {'input_image': batch[self.content_info['key']]} if return_rec: content_samples['reconstruction_image'] = self.content_codec.decode(content['content_token']) for fr in filter_ratio: for cr in content_ratio: num_content_tokens = int((content['content_token'].shape[1] * cr)) if num_content_tokens < 0: continue else: content_token = content['content_token'][:, :num_content_tokens] if sample_type == 'debug': trans_out = self.transformer.sample_debug(condition_token=condition['condition_token'], condition_mask=condition.get('condition_mask', None), condition_embed=condition.get('condition_embed_token', None), content_token=content_token, filter_ratio=fr, temperature=temperature, return_att_weight=return_att_weight, return_logits=return_logits, content_logits=content.get('content_logits', None), sample_type=sample_type, **kwargs) else: trans_out = self.transformer.sample(condition_token=condition['condition_token'], condition_mask=condition.get('condition_mask', None), condition_embed=condition.get('condition_embed_token', None), content_token=content_token, filter_ratio=fr, temperature=temperature, return_att_weight=return_att_weight, return_logits=return_logits, content_logits=content.get('content_logits', None), sample_type=sample_type, **kwargs) content_samples['cond1_cont{}_fr{}_image'.format(cr, fr)] = self.content_codec.decode(trans_out['content_token']) if return_att_weight: content_samples['cond1_cont{}_fr{}_image_condition_attention'.format(cr, fr)] = trans_out['condition_attention'] # B x Lt x Ld content_att = trans_out['content_attention'] shape = *content_att.shape[:-1], self.content.token_shape[0], self.content.token_shape[1] content_samples['cond1_cont{}_fr{}_image_content_attention'.format(cr, fr)] = content_att.view(*shape) # B x Lt x Lt -> B x Lt x H x W if return_logits: content_samples['logits'] = trans_out['logits'] self.train() output = {'condition': batch[self.condition_info['key']]} output.update(content_samples) return output def forward( self, batch, name='none', **kwargs ): input = self.prepare_input(batch) output = self.transformer(input, **kwargs) return output
14,512
43.246951
154
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/class_embedding.py
import torch import torch.nn as nn from .base_embedding import BaseEmbedding class ClassEmbedding(BaseEmbedding): def __init__(self, num_embed=1000, embed_dim=512, identity=False, trainable=True, ): super().__init__() self.identity = identity self.trainable = trainable self.num_embed = num_embed self.embed_dim = embed_dim if self.identity == False: self.emb = nn.Embedding(self.num_embed, embed_dim) self._set_trainable() def forward(self, index, **kwargs): """ index: B x L, index mask: B x L, bool type. The value of False indicating padded index """ if self.identity == True: return index else: emb = self.emb(index).unsqueeze(1) return emb
899
26.272727
74
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/dalle_mask_image_embedding.py
import torch import torch.nn as nn from .base_embedding import BaseEmbedding class DalleMaskImageEmbedding(BaseEmbedding): def __init__(self, num_embed=8192, spatial_size=[32, 32], # height and with embed_dim=3968, trainable=True, pos_emb_type='embedding' ): super().__init__() if isinstance(spatial_size, int): spatial_size = [spatial_size, spatial_size] self.spatial_size = spatial_size self.num_embed = num_embed + 1 self.embed_dim = embed_dim self.trainable = trainable self.pos_emb_type = pos_emb_type assert self.pos_emb_type in ['embedding', 'parameter'] self.emb = nn.Embedding(self.num_embed, embed_dim) if self.pos_emb_type == 'embedding': self.height_emb = nn.Embedding(self.spatial_size[0], embed_dim) # height self.width_emb = nn.Embedding(self.spatial_size[1], embed_dim) # width else: self.height_emb = nn.Parameter(torch.zeros(1, self.spatial_size[0], embed_dim)) # height #32,1024 self.width_emb = nn.Parameter(torch.zeros(1, self.spatial_size[1], embed_dim)) # width #32,1024 self._set_trainable() def forward(self, index, **kwargs): assert index.dim() == 2 # B x L try: index[index < 0] = 0 emb = self.emb(index) except: raise RuntimeError('IndexError: index out of range in self, max index {}, num embed {}'.format(index.max(), self.num_embed)) # add col and row embedding if emb.shape[1] > 0: # if False: if self.pos_emb_type == 'embedding': height_emb = self.height_emb(torch.arange(self.spatial_size[0], device=index.device).view(1, self.spatial_size[0])).unsqueeze(2) # 1 x H x D -> 1 x H x 1 x D width_emb = self.width_emb(torch.arange(self.spatial_size[1], device=index.device).view(1, self.spatial_size[1])).unsqueeze(1) # 1 x W x D -> 1 x 1 x W x D else: height_emb = self.height_emb.unsqueeze(2) # 1 x H x D -> 1 x H x 1 x D width_emb = self.width_emb.unsqueeze(1) # 1 x W x D -> 1 x 1 x W x D pos_emb = (height_emb + width_emb).view(1, self.spatial_size[0] * self.spatial_size[1], -1) # 1 x H x W x D -> 1 x L xD emb = emb + pos_emb[:, :emb.shape[1], :] return emb
2,507
42.241379
173
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/base_embedding.py
import torch from torch import nn class BaseEmbedding(nn.Module): def get_loss(self): return None def forward(self, **kwargs): raise NotImplementedError def train(self, mode=True): self.training = mode if self.trainable and mode: super().train() return self def _set_trainable(self): if not self.trainable: for pn, p in self.named_parameters(): p.requires_grad = False self.eval()
507
19.32
49
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/clip_text_embedding.py
import torch import torch.nn as nn from image_synthesis.modeling.modules.clip import clip from image_synthesis.modeling.modules.clip import model as clip_model from .base_embedding import BaseEmbedding class CLIPTextEmbedding(BaseEmbedding): def __init__(self, clip_name='ViT-B/32', num_embed=49408, normalize=True, pick_last_embedding=True, keep_seq_len_dim=False, additional_last_embedding=False, embed_dim=1024, ): super().__init__() self.num_embed = num_embed self.clip_name = clip_name self.normalize = normalize self.pick_last_embedding = pick_last_embedding self.keep_seq_len_dim = keep_seq_len_dim self.additional_last_embedding = additional_last_embedding model, _ = clip.load(clip_name, device='cpu',jit=False) model = clip_model.build_model(model.state_dict()) self.token_embedding = model.token_embedding self.positional_embedding = model.positional_embedding self.transformer = model.transformer self.ln_final = model.ln_final self.text_projection = model.text_projection if embed_dim == 1024: self.embed_dim = self.text_projection.shape[1]*2 # to fit 1024 dimension of image embedding else: self.embed_dim = self.text_projection.shape[1] # original output, 512 dim self.trainable = False self._set_trainable() @property def dtype(self): return self.transformer.resblocks[0].attn.in_proj_weight.dtype def encode_text(self, text): text[text < 0] = 0 # some padded text token maybe negative, so set them to 0 x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding.type(self.dtype) x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x) x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x).type(self.dtype) # x.shape = [batch_size, n_ctx, transformer.width] if self.pick_last_embedding: # take features from the eot embedding (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection # [batch_size, transformer.width] if self.keep_seq_len_dim: x = x.unsqueeze(dim=1) # [batch_size, 1, transformer.width] return x def forward(self, index, **kwargs): """ index: B x L, index mask: B x L, bool type. The value of False indicating padded index """ assert index.dim() == 2 # B x L text_feature = self.encode_text(index) if self.embed_dim == 1024: text_features = torch.cat((text_feature, text_feature), dim=2) else: text_features = text_feature if self.normalize: text_features = text_features / text_features.norm(dim=-1, keepdim=True) if self.additional_last_embedding == True: last_feature = text_feature[torch.arange(text_feature.shape[0]), index.argmax(dim=-1)] @ self.text_projection if self.keep_seq_len_dim: last_feature = last_feature.unsqueeze(dim=1) return text_features, last_feature return text_features
3,423
37.47191
121
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/utils/misc.py
from numpy.core.fromnumeric import resize from numpy.lib.function_base import kaiser from numpy.lib.npyio import save import torch import random import math from image_synthesis.distributed.distributed import all_reduce, get_world_size def logits_top_k(logits, filter_ratio = 0.5, minimum=1, pad_value=None): logits = logits.contiguous() if filter_ratio < 0: filter_ratio = - filter_ratio if filter_ratio >= 0 and filter_ratio <= 1.0: num_logits = logits.shape[-1] k = max(int((1 - filter_ratio) * num_logits), minimum) else: k = max(int(filter_ratio), minimum) val, ind = torch.topk(input=logits, k=k, dim=-1) if pad_value is None: pad_value = float('-inf') probs = torch.full_like(logits, pad_value) # probs.scatter_(1, ind, val) probs.scatter_(-1, ind, val) return probs def mask_with_top_k(x, k, largest=True, abs=True, pad_value=None): """ mask the input tensor along the last dimension. The values the not in the topk will be masked as zeros """ if abs: x_ = x.abs() else: x_ = x _, top_k_index = x_.topk(k=k, dim=-1, largest=largest) # BHW x K mask = torch.zeros_like(x) ones = torch.ones_like(x) mask.scatter_(-1, index=top_k_index, src=ones) x = x * mask if pad_value is None or pad_value != 0: if pad_value is None: pad_value = float('-inf') x[mask == 0] = x[mask == 0] + pad_value return x def sample_index_randomly(x, k, filter_ratio=0, largest=True): """ x: should be 2D tensor, randomly smaple along the lat dimension """ assert x.dim() == 2, 'currently only two dimensional tensors are supprted!' if filter_ratio < 0: filter_ratio = - filter_ratio if filter_ratio >= 0 and filter_ratio <= 1.0: num_logits = x.shape[-1] topk = max(int((1 - filter_ratio) * num_logits), k) else: topk = max(int(filter_ratio), k) _, top_k_index = x.topk(k=topk, dim=-1, largest=largest) # BHW x K sampled = [] for i in range(x.shape[0]): index = top_k_index[i] sampled_ = torch.tensor(random.sample(index.tolist(), k)).to(index) sampled.append(sampled_) sampled = torch.stack(sampled, dim=0).to(top_k_index) return sampled def get_token_type(mask, token_shape): """ Get the token type according to the given mask and token_shape. Note that we treat tokens into 3 types. 0: masked tokens 1: unmasked tokens 2: partially masked tokens Args: mask: 4D tensor, B x 1 x H x W, the mask of the origin image. 1 denotes masked pixles and 0 denotes unmasked pixels. token_shape: [H/r, W/r]. the shape of token """ mask_float = mask.float() mask_unshuffle = pixel_unshuffle(mask_float, token_shape) # B x r^2 x H/r x W/r scale_factor = mask_unshuffle.shape[1] mask_unshuffle = mask_unshuffle.sum(dim=1, keepdim=True) # B x 1 x H/r x W/r token_type = torch.zeros_like(mask_unshuffle).long() + 2 token_type[mask_unshuffle==0] = 0 # unmasked tokens token_type[mask_unshuffle==scale_factor] = 1 # fully masked tokens return token_type def gen_attention_mask(H, W, type='full', causal=True, condition_seq_len=0, **kwargs): content_seq_len = H * W seq_len = content_seq_len + condition_seq_len mask = torch.zeros(seq_len, seq_len) mask[:, :condition_seq_len] = 1 if type == 'full': mask += 1 elif type == 'dalle_row': for idx in range(content_seq_len): h = idx // W w = idx % W for w_ in range(w-W, w+1): i = h * W + w_ mask[idx+condition_seq_len][i+condition_seq_len] = 1 elif type == 'dalle_col': for idx in range(content_seq_len): h = idx // W w = idx % W for h_ in range(h+1): i = h_ * W + w mask[idx+condition_seq_len][i+condition_seq_len] = 1 elif type == 'dalle_conv': kernel_size = kwargs['kernel_size'] if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] k_h, k_w = kernel_size[0], kernel_size[1] half_k_h = int(k_h/2) half_k_w = int(k_w/2) step_over_w = W - k_w for idx in range(content_seq_len): max_kernel_count = (half_k_h+1) * k_w step_over_count = step_over_w * (half_k_h+1) max_pre = max_kernel_count + step_over_count max_pre = min(idx+1, max_pre) for i in range(max_pre): valid = False a = i % W if a > half_k_w and a <= half_k_w + step_over_w: valid = False else: valid = True if valid: mask[idx+condition_seq_len][idx-i+condition_seq_len] = 1 else: raise NotImplementedError('attention type {} not implemented!'.format(type)) if causal: causal_mask = torch.tril(torch.ones(content_seq_len+condition_seq_len, content_seq_len+condition_seq_len)) mask *= causal_mask return mask
5,282
32.01875
114
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/transformers/diffusion_transformer.py
# VQ-Diffusion import math import torch from torch import nn import torch.nn.functional as F from image_synthesis.utils.misc import instantiate_from_config import numpy as np from einops import rearrange from image_synthesis.distributed.distributed import is_primary, get_rank from inspect import isfunction from torch.cuda.amp import autocast from image_synthesis.modeling.transformers.transformer_utils import Text2ImageTransformer eps = 1e-8 def sum_except_batch(x, num_dims=1): return x.reshape(*x.shape[:num_dims], -1).sum(-1) def log_1_min_a(a): return torch.log(1 - a.exp() + 1e-40) def log_add_exp(a, b): maximum = torch.max(a, b) return maximum + torch.log(torch.exp(a - maximum) + torch.exp(b - maximum)) def extract(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) def log_categorical(log_x_start, log_prob): return (log_x_start.exp() * log_prob).sum(dim=1) def index_to_log_onehot(x, num_classes): assert x.max().item() < num_classes, \ f'Error: {x.max().item()} >= {num_classes}' x_onehot = F.one_hot(x, num_classes) permute_order = (0, -1) + tuple(range(1, len(x.size()))) x_onehot = x_onehot.permute(permute_order) log_x = torch.log(x_onehot.float().clamp(min=1e-30)) return log_x def log_onehot_to_index(log_x): return log_x.argmax(1) def alpha_schedule(time_step, N=100, att_1 = 0.99999, att_T = 0.000009, ctt_1 = 0.000009, ctt_T = 0.99999): att = np.arange(0, time_step)/(time_step-1)*(att_T - att_1) + att_1 att = np.concatenate(([1], att)) at = att[1:]/att[:-1] ctt = np.arange(0, time_step)/(time_step-1)*(ctt_T - ctt_1) + ctt_1 ctt = np.concatenate(([0], ctt)) one_minus_ctt = 1 - ctt one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1] ct = 1-one_minus_ct bt = (1-at-ct)/N att = np.concatenate((att[1:], [1])) ctt = np.concatenate((ctt[1:], [0])) btt = (1-att-ctt)/N return at, bt, ct, att, btt, ctt class DiffusionTransformer(nn.Module): def __init__( self, *, content_emb_config=None, condition_emb_config=None, transformer_config=None, diffusion_step=100, alpha_init_type='cos', auxiliary_loss_weight=0, adaptive_auxiliary_loss=False, mask_weight=[1,1], learnable_cf=False, ): super().__init__() if condition_emb_config is None: self.condition_emb = None else: # for condition and config, we learn a seperate embedding self.condition_emb = instantiate_from_config(condition_emb_config) self.condition_dim = self.condition_emb.embed_dim transformer_config['params']['diffusion_step'] = diffusion_step transformer_config['params']['content_emb_config'] = content_emb_config self.transformer = instantiate_from_config(transformer_config) self.content_seq_len = transformer_config['params']['content_seq_len'] self.amp = False self.num_classes = self.transformer.content_emb.num_embed self.loss_type = 'vb_stochastic' self.shape = transformer_config['params']['content_seq_len'] self.num_timesteps = diffusion_step self.parametrization = 'x0' self.auxiliary_loss_weight = auxiliary_loss_weight self.adaptive_auxiliary_loss = adaptive_auxiliary_loss self.mask_weight = mask_weight if alpha_init_type == "alpha1": at, bt, ct, att, btt, ctt = alpha_schedule(self.num_timesteps, N=self.num_classes-1) else: print("alpha_init_type is Wrong !! ") at = torch.tensor(at.astype('float64')) bt = torch.tensor(bt.astype('float64')) ct = torch.tensor(ct.astype('float64')) log_at = torch.log(at) log_bt = torch.log(bt) log_ct = torch.log(ct) att = torch.tensor(att.astype('float64')) btt = torch.tensor(btt.astype('float64')) ctt = torch.tensor(ctt.astype('float64')) log_cumprod_at = torch.log(att) log_cumprod_bt = torch.log(btt) log_cumprod_ct = torch.log(ctt) log_1_min_ct = log_1_min_a(log_ct) log_1_min_cumprod_ct = log_1_min_a(log_cumprod_ct) assert log_add_exp(log_ct, log_1_min_ct).abs().sum().item() < 1.e-5 assert log_add_exp(log_cumprod_ct, log_1_min_cumprod_ct).abs().sum().item() < 1.e-5 self.diffusion_acc_list = [0] * self.num_timesteps self.diffusion_keep_list = [0] * self.num_timesteps # Convert to float32 and register buffers. self.register_buffer('log_at', log_at.float()) self.register_buffer('log_bt', log_bt.float()) self.register_buffer('log_ct', log_ct.float()) self.register_buffer('log_cumprod_at', log_cumprod_at.float()) self.register_buffer('log_cumprod_bt', log_cumprod_bt.float()) self.register_buffer('log_cumprod_ct', log_cumprod_ct.float()) self.register_buffer('log_1_min_ct', log_1_min_ct.float()) self.register_buffer('log_1_min_cumprod_ct', log_1_min_cumprod_ct.float()) self.register_buffer('Lt_history', torch.zeros(self.num_timesteps)) self.register_buffer('Lt_count', torch.zeros(self.num_timesteps)) self.zero_vector = None if learnable_cf: self.empty_text_embed = torch.nn.Parameter(torch.randn(size=(77, 512), requires_grad=True, dtype=torch.float64)) self.prior_rule = 0 # inference rule: 0 for VQ-Diffusion v1, 1 for only high-quality inference, 2 for purity prior self.prior_ps = 1024 # max number to sample per step self.prior_weight = 0 # probability adjust parameter, 'r' in Equation.11 of Improved VQ-Diffusion self.update_n_sample() self.learnable_cf = learnable_cf def update_n_sample(self): if self.num_timesteps == 100: if self.prior_ps <= 10: self.n_sample = [1, 6] + [11, 10, 10] * 32 + [11, 15] else: self.n_sample = [1, 10] + [11, 10, 10] * 32 + [11, 11] elif self.num_timesteps == 50: self.n_sample = [10] + [21, 20] * 24 + [30] elif self.num_timesteps == 25: self.n_sample = [21] + [41] * 23 + [60] elif self.num_timesteps == 10: self.n_sample = [69] + [102] * 8 + [139] def multinomial_kl(self, log_prob1, log_prob2): # compute KL loss on log_prob kl = (log_prob1.exp() * (log_prob1 - log_prob2)).sum(dim=1) return kl def q_pred_one_timestep(self, log_x_t, t): # q(xt|xt_1) log_at = extract(self.log_at, t, log_x_t.shape) # at log_bt = extract(self.log_bt, t, log_x_t.shape) # bt log_ct = extract(self.log_ct, t, log_x_t.shape) # ct log_1_min_ct = extract(self.log_1_min_ct, t, log_x_t.shape) # 1-ct log_probs = torch.cat( [ log_add_exp(log_x_t[:,:-1,:]+log_at, log_bt), log_add_exp(log_x_t[:, -1:, :] + log_1_min_ct, log_ct) ], dim=1 ) return log_probs def q_pred(self, log_x_start, t): # q(xt|x0) # log_x_start can be onehot or not t = (t + (self.num_timesteps + 1))%(self.num_timesteps + 1) log_cumprod_at = extract(self.log_cumprod_at, t, log_x_start.shape) # at~ log_cumprod_bt = extract(self.log_cumprod_bt, t, log_x_start.shape) # bt~ log_cumprod_ct = extract(self.log_cumprod_ct, t, log_x_start.shape) # ct~ log_1_min_cumprod_ct = extract(self.log_1_min_cumprod_ct, t, log_x_start.shape) # 1-ct~ log_probs = torch.cat( [ log_add_exp(log_x_start[:,:-1,:]+log_cumprod_at, log_cumprod_bt), log_add_exp(log_x_start[:,-1:,:]+log_1_min_cumprod_ct, log_cumprod_ct) ], dim=1 ) return log_probs def predict_start(self, log_x_t, cond_emb, t): # p(x0|xt) x_t = log_onehot_to_index(log_x_t) if self.amp == True: with autocast(): out = self.transformer(x_t, cond_emb, t) else: out = self.transformer(x_t, cond_emb, t) assert out.size(0) == x_t.size(0) assert out.size(1) == self.num_classes-1 assert out.size()[2:] == x_t.size()[1:] log_pred = F.log_softmax(out.double(), dim=1).float() batch_size = log_x_t.size()[0] if self.zero_vector is None or self.zero_vector.shape[0] != batch_size: self.zero_vector = torch.zeros(batch_size, 1, self.content_seq_len).type_as(log_x_t)- 70 log_pred = torch.cat((log_pred, self.zero_vector), dim=1) log_pred = torch.clamp(log_pred, -70, 0) return log_pred def cf_predict_start(self, log_x_t, cond_emb, t): return self.predict_start(log_x_t, cond_emb, t) def q_posterior(self, log_x_start, log_x_t, t): # p_theta(xt_1|xt) = sum(q(xt-1|xt,x0')*p(x0')) # notice that log_x_t is onehot assert t.min().item() >= 0 and t.max().item() < self.num_timesteps batch_size = log_x_start.size()[0] onehot_x_t = log_onehot_to_index(log_x_t) mask = (onehot_x_t == self.num_classes-1).unsqueeze(1) log_one_vector = torch.zeros(batch_size, 1, 1).type_as(log_x_t) log_zero_vector = torch.log(log_one_vector+1.0e-30).expand(-1, -1, self.content_seq_len) log_qt = self.q_pred(log_x_t, t) # q(xt|x0) # log_qt = torch.cat((log_qt[:,:-1,:], log_zero_vector), dim=1) log_qt = log_qt[:,:-1,:] log_cumprod_ct = extract(self.log_cumprod_ct, t, log_x_start.shape) # ct~ ct_cumprod_vector = log_cumprod_ct.expand(-1, self.num_classes-1, -1) # ct_cumprod_vector = torch.cat((ct_cumprod_vector, log_one_vector), dim=1) log_qt = (~mask)*log_qt + mask*ct_cumprod_vector log_qt_one_timestep = self.q_pred_one_timestep(log_x_t, t) # q(xt|xt_1) log_qt_one_timestep = torch.cat((log_qt_one_timestep[:,:-1,:], log_zero_vector), dim=1) log_ct = extract(self.log_ct, t, log_x_start.shape) # ct ct_vector = log_ct.expand(-1, self.num_classes-1, -1) ct_vector = torch.cat((ct_vector, log_one_vector), dim=1) log_qt_one_timestep = (~mask)*log_qt_one_timestep + mask*ct_vector # log_x_start = torch.cat((log_x_start, log_zero_vector), dim=1) # q = log_x_start - log_qt q = log_x_start[:,:-1,:] - log_qt q = torch.cat((q, log_zero_vector), dim=1) q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True) q = q - q_log_sum_exp log_EV_xtmin_given_xt_given_xstart = self.q_pred(q, t-1) + log_qt_one_timestep + q_log_sum_exp return torch.clamp(log_EV_xtmin_given_xt_given_xstart, -70, 0) def p_pred(self, log_x, cond_emb, t): # if x0, first p(x0|xt), than sum(q(xt-1|xt,x0)*p(x0|xt)) if self.parametrization == 'x0': log_x_recon = self.cf_predict_start(log_x, cond_emb, t) log_model_pred = self.q_posterior( log_x_start=log_x_recon, log_x_t=log_x, t=t) elif self.parametrization == 'direct': log_model_pred = self.predict_start(log_x, cond_emb, t) else: raise ValueError return log_model_pred, log_x_recon @torch.no_grad() def p_sample(self, log_x, cond_emb, t, sampled=None, to_sample=None): # sample q(xt-1) for next step from xt, actually is p(xt-1|xt) model_log_prob, log_x_recon = self.p_pred(log_x, cond_emb, t) max_sample_per_step = self.prior_ps # max number to sample per step if t[0] > 0 and self.prior_rule > 0 and to_sample is not None: # prior_rule: 0 for VQ-Diffusion v1, 1 for only high-quality inference, 2 for purity prior log_x_idx = log_onehot_to_index(log_x) if self.prior_rule == 1: score = torch.ones((log_x.shape[0], log_x.shape[2])).to(log_x.device) elif self.prior_rule == 2: score = torch.exp(log_x_recon).max(dim=1).values.clamp(0, 1) score /= (score.max(dim=1, keepdim=True).values + 1e-10) if self.prior_rule != 1 and self.prior_weight > 0: # probability adjust parameter, prior_weight: 'r' in Equation.11 of Improved VQ-Diffusion prob = ((1 + score * self.prior_weight).unsqueeze(1) * log_x_recon).softmax(dim=1) prob = prob.log().clamp(-70, 0) else: prob = log_x_recon out = self.log_sample_categorical(prob) out_idx = log_onehot_to_index(out) out2_idx = log_x_idx.clone() _score = score.clone() if _score.sum() < 1e-6: _score += 1 _score[log_x_idx != self.num_classes - 1] = 0 for i in range(log_x.shape[0]): n_sample = min(to_sample - sampled[i], max_sample_per_step) if to_sample - sampled[i] - n_sample == 1: n_sample = to_sample - sampled[i] if n_sample <= 0: continue sel = torch.multinomial(_score[i], n_sample) out2_idx[i][sel] = out_idx[i][sel] sampled[i] += ((out2_idx[i] != self.num_classes - 1).sum() - (log_x_idx[i] != self.num_classes - 1).sum()).item() out = index_to_log_onehot(out2_idx, self.num_classes) else: # Gumbel sample out = self.log_sample_categorical(model_log_prob) sampled = [1024] * log_x.shape[0] if to_sample is not None: return out, sampled else: return out def log_sample_categorical(self, logits): # use gumbel to sample onehot vector from log probability uniform = torch.rand_like(logits) gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30) sample = (gumbel_noise + logits).argmax(dim=1) log_sample = index_to_log_onehot(sample, self.num_classes) return log_sample def q_sample(self, log_x_start, t): # diffusion step, q(xt|x0) and sample xt log_EV_qxt_x0 = self.q_pred(log_x_start, t) log_sample = self.log_sample_categorical(log_EV_qxt_x0) return log_sample def sample_time(self, b, device, method='uniform'): if method == 'importance': if not (self.Lt_count > 10).all(): return self.sample_time(b, device, method='uniform') Lt_sqrt = torch.sqrt(self.Lt_history + 1e-10) + 0.0001 Lt_sqrt[0] = Lt_sqrt[1] # Overwrite decoder term with L1. pt_all = Lt_sqrt / Lt_sqrt.sum() t = torch.multinomial(pt_all, num_samples=b, replacement=True) pt = pt_all.gather(dim=0, index=t) return t, pt elif method == 'uniform': t = torch.randint(0, self.num_timesteps, (b,), device=device).long() pt = torch.ones_like(t).float() / self.num_timesteps return t, pt else: raise ValueError def _train_loss(self, x, cond_emb, is_train=True): # get the KL loss b, device = x.size(0), x.device assert self.loss_type == 'vb_stochastic' x_start = x t, pt = self.sample_time(b, device, 'importance') log_x_start = index_to_log_onehot(x_start, self.num_classes) log_xt = self.q_sample(log_x_start=log_x_start, t=t) xt = log_onehot_to_index(log_xt) log_x0_recon = self.predict_start(log_xt, cond_emb, t=t) # P_theta(x0|xt) log_model_prob = self.q_posterior(log_x_start=log_x0_recon, log_x_t=log_xt, t=t) # go through q(xt_1|xt,x0) x0_recon = log_onehot_to_index(log_x0_recon) x0_real = x_start xt_1_recon = log_onehot_to_index(log_model_prob) xt_recon = log_onehot_to_index(log_xt) for index in range(t.size()[0]): this_t = t[index].item() same_rate = (x0_recon[index] == x0_real[index]).sum().cpu()/x0_real.size()[1] self.diffusion_acc_list[this_t] = same_rate.item()*0.1 + self.diffusion_acc_list[this_t]*0.9 same_rate = (xt_1_recon[index] == xt_recon[index]).sum().cpu()/xt_recon.size()[1] self.diffusion_keep_list[this_t] = same_rate.item()*0.1 + self.diffusion_keep_list[this_t]*0.9 # compute log_true_prob now log_true_prob = self.q_posterior(log_x_start=log_x_start, log_x_t=log_xt, t=t) kl = self.multinomial_kl(log_true_prob, log_model_prob) mask_region = (xt == self.num_classes-1).float() mask_weight = mask_region * self.mask_weight[0] + (1. - mask_region) * self.mask_weight[1] kl = kl * mask_weight kl = sum_except_batch(kl) decoder_nll = -log_categorical(log_x_start, log_model_prob) decoder_nll = sum_except_batch(decoder_nll) mask = (t == torch.zeros_like(t)).float() kl_loss = mask * decoder_nll + (1. - mask) * kl Lt2 = kl_loss.pow(2) Lt2_prev = self.Lt_history.gather(dim=0, index=t) new_Lt_history = (0.1 * Lt2 + 0.9 * Lt2_prev).detach() self.Lt_history.scatter_(dim=0, index=t, src=new_Lt_history) self.Lt_count.scatter_add_(dim=0, index=t, src=torch.ones_like(Lt2)) # Upweigh loss term of the kl # vb_loss = kl_loss / pt + kl_prior loss1 = kl_loss / pt vb_loss = loss1 if self.auxiliary_loss_weight != 0 and is_train==True: kl_aux = self.multinomial_kl(log_x_start[:,:-1,:], log_x0_recon[:,:-1,:]) kl_aux = kl_aux * mask_weight kl_aux = sum_except_batch(kl_aux) kl_aux_loss = mask * decoder_nll + (1. - mask) * kl_aux if self.adaptive_auxiliary_loss == True: addition_loss_weight = (1-t/self.num_timesteps) + 1.0 else: addition_loss_weight = 1.0 loss2 = addition_loss_weight * self.auxiliary_loss_weight * kl_aux_loss / pt vb_loss += loss2 return log_model_prob, vb_loss @property def device(self): return self.transformer.to_logits[-1].weight.device def parameters(self, recurse=True, name=None): """ Following minGPT: This long function is unfortunately doing something very simple and is being very defensive: We are separating out all parameters of the model into two buckets: those that will experience weight decay for regularization and those that won't (biases, and layernorm/embedding weights). We are then returning the PyTorch optimizer object. """ # return super().parameters(recurse=True) if name is None or name == 'none': return super().parameters(recurse=recurse) else: # separate out all parameters to those that will and won't experience regularizing weight decay print("GPTLikeTransformer: get parameters by the overwrite method!") decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) # special case the position embedding parameter as not decayed module_name = ['condition_emb', 'content_emb'] pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb'] for mn in module_name: if hasattr(self, mn) and getattr(self, mn) is not None: for pn in pos_emb_name: if hasattr(getattr(self, mn), pn): if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter): no_decay.add('{}.{}'.format(mn, pn)) # validate that we considered every parameter param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad} inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] return optim_groups def forward( self, input, return_loss=False, return_logits=True, return_att_weight=False, is_train=True, **kwargs): if kwargs.get('autocast') == True: self.amp = True batch_size = input['content_token'].shape[0] device = input['content_token'].device # 1) get embeddding for condition and content prepare input sample_image = input['content_token'].type_as(input['content_token']) # cont_emb = self.content_emb(sample_image) if self.condition_emb is not None: with autocast(enabled=False): with torch.no_grad(): cond_emb = self.condition_emb(input['condition_token']) # B x Ld x D #256*1024 if self.learnable_cf: is_empty_text = torch.logical_not(input['condition_mask'][:, 2]).unsqueeze(1).unsqueeze(2).repeat(1, 77, 512) cond_emb = torch.where(is_empty_text, self.empty_text_embed.unsqueeze(0).repeat(cond_emb.shape[0], 1, 1), cond_emb.type_as(self.empty_text_embed)) cond_emb = cond_emb.float() else: # share condition embeding with content if input.get('condition_embed_token') == None: cond_emb = None else: cond_emb = input['condition_embed_token'].float() # now we get cond_emb and sample_image if is_train == True: log_model_prob, loss = self._train_loss(sample_image, cond_emb) loss = loss.sum()/(sample_image.size()[0] * sample_image.size()[1]) # 4) get output, especially loss out = {} if return_logits: out['logits'] = torch.exp(log_model_prob) if return_loss: out['loss'] = loss self.amp = False return out def sample( self, condition_token, condition_mask, condition_embed, content_token = None, filter_ratio = 0.5, temperature = 1.0, return_att_weight = False, return_logits = False, content_logits = None, print_log = True, **kwargs): input = {'condition_token': condition_token, 'content_token': content_token, 'condition_mask': condition_mask, 'condition_embed_token': condition_embed, 'content_logits': content_logits, } if input['condition_token'] != None: batch_size = input['condition_token'].shape[0] else: batch_size = kwargs['batch_size'] device = self.log_at.device start_step = int(self.num_timesteps * filter_ratio) # get cont_emb and cond_emb if content_token != None: sample_image = input['content_token'].type_as(input['content_token']) if self.condition_emb is not None: # do this with torch.no_grad(): cond_emb = self.condition_emb(input['condition_token']) # B x Ld x D #256*1024 cond_emb = cond_emb.float() else: # share condition embeding with content if input.get('condition_embed_token', None) != None: cond_emb = input['condition_embed_token'].float() else: cond_emb = None if start_step == 0: # use full mask sample zero_logits = torch.zeros((batch_size, self.num_classes-1, self.shape),device=device) one_logits = torch.ones((batch_size, 1, self.shape),device=device) mask_logits = torch.cat((zero_logits, one_logits), dim=1) log_z = torch.log(mask_logits) start_step = self.num_timesteps with torch.no_grad(): for diffusion_index in range(start_step-1, -1, -1): t = torch.full((batch_size,), diffusion_index, device=device, dtype=torch.long) sampled = [0] * log_z.shape[0] while min(sampled) < self.n_sample[diffusion_index]: log_z, sampled = self.p_sample(log_z, cond_emb, t, sampled, self.n_sample[diffusion_index]) # log_z is log_onehot else: t = torch.full((batch_size,), start_step-1, device=device, dtype=torch.long) log_x_start = index_to_log_onehot(sample_image, self.num_classes) log_xt = self.q_sample(log_x_start=log_x_start, t=t) log_z = log_xt with torch.no_grad(): for diffusion_index in range(start_step-1, -1, -1): t = torch.full((batch_size,), diffusion_index, device=device, dtype=torch.long) log_z = self.p_sample(log_z, cond_emb, t) # log_z is log_onehot content_token = log_onehot_to_index(log_z) output = {'content_token': content_token} if return_logits: output['logits'] = torch.exp(log_z) return output def sample_fast( self, condition_token, condition_mask, condition_embed, content_token = None, filter_ratio = 0.5, temperature = 1.0, return_att_weight = False, return_logits = False, content_logits = None, print_log = True, skip_step = 1, **kwargs): input = {'condition_token': condition_token, 'content_token': content_token, 'condition_mask': condition_mask, 'condition_embed_token': condition_embed, 'content_logits': content_logits, } batch_size = input['condition_token'].shape[0] device = self.log_at.device start_step = int(self.num_timesteps * filter_ratio) # get cont_emb and cond_emb if content_token != None: sample_image = input['content_token'].type_as(input['content_token']) if self.condition_emb is not None: with torch.no_grad(): cond_emb = self.condition_emb(input['condition_token']) # B x Ld x D #256*1024 cond_emb = cond_emb.float() else: # share condition embeding with content cond_emb = input['condition_embed_token'].float() assert start_step == 0 zero_logits = torch.zeros((batch_size, self.num_classes-1, self.shape),device=device) one_logits = torch.ones((batch_size, 1, self.shape),device=device) mask_logits = torch.cat((zero_logits, one_logits), dim=1) log_z = torch.log(mask_logits) start_step = self.num_timesteps with torch.no_grad(): # skip_step = 1 diffusion_list = [index for index in range(start_step-1, -1, -1-skip_step)] if diffusion_list[-1] != 0: diffusion_list.append(0) # for diffusion_index in range(start_step-1, -1, -1): for diffusion_index in diffusion_list: t = torch.full((batch_size,), diffusion_index, device=device, dtype=torch.long) log_x_recon = self.cf_predict_start(log_z, cond_emb, t) if diffusion_index > skip_step: model_log_prob = self.q_posterior(log_x_start=log_x_recon, log_x_t=log_z, t=t-skip_step) else: model_log_prob = self.q_posterior(log_x_start=log_x_recon, log_x_t=log_z, t=t) log_z = self.log_sample_categorical(model_log_prob) content_token = log_onehot_to_index(log_z) output = {'content_token': content_token} if return_logits: output['logits'] = torch.exp(log_z) return output
29,919
42.678832
166
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/transformers/transformer_utils.py
# VQ-Diffusion import math import torch from torch import nn import torch.nn.functional as F from image_synthesis.utils.misc import instantiate_from_config import numpy as np from einops import rearrange from image_synthesis.distributed.distributed import is_primary, get_rank from inspect import isfunction from torch.cuda.amp import autocast from torch.utils.checkpoint import checkpoint class FullAttention(nn.Module): def __init__(self, n_embd, # the embed dim n_head, # the number of heads seq_len=None, # the max length of sequence attn_pdrop=0.1, # attention dropout prob resid_pdrop=0.1, # residual attention dropout prob causal=True, ): super().__init__() assert n_embd % n_head == 0 # key, query, value projections for all heads self.key = nn.Linear(n_embd, n_embd) self.query = nn.Linear(n_embd, n_embd) self.value = nn.Linear(n_embd, n_embd) # regularization self.attn_drop = nn.Dropout(attn_pdrop) self.resid_drop = nn.Dropout(resid_pdrop) # output projection self.proj = nn.Linear(n_embd, n_embd) self.n_head = n_head self.causal = causal def forward(self, x, encoder_output, mask=None): B, T, C = x.size() k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) # (B, nh, T, T) att = F.softmax(att, dim=-1) # (B, nh, T, T) att = self.attn_drop(att) y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side, (B, T, C) att = att.mean(dim=1, keepdim=False) # (B, T, T) # output projection y = self.resid_drop(self.proj(y)) return y, att class CrossAttention(nn.Module): def __init__(self, condition_seq_len, n_embd, # the embed dim condition_embd, # condition dim n_head, # the number of heads seq_len=None, # the max length of sequence attn_pdrop=0.1, # attention dropout prob resid_pdrop=0.1, # residual attention dropout prob causal=True, ): super().__init__() assert n_embd % n_head == 0 # key, query, value projections for all heads self.key = nn.Linear(condition_embd, n_embd) self.query = nn.Linear(n_embd, n_embd) self.value = nn.Linear(condition_embd, n_embd) # regularization self.attn_drop = nn.Dropout(attn_pdrop) self.resid_drop = nn.Dropout(resid_pdrop) # output projection self.proj = nn.Linear(n_embd, n_embd) self.n_head = n_head self.causal = causal # causal mask to ensure that attention is only applied to the left in the input sequence if self.causal: self.register_buffer("mask", torch.tril(torch.ones(seq_len, seq_len)) .view(1, 1, seq_len, seq_len)) def forward(self, x, encoder_output, mask=None): B, T, C = x.size() B, T_E, _ = encoder_output.size() # calculate query, key, values for all heads in batch and move head forward to be the batch dim k = self.key(encoder_output).view(B, T_E, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) v = self.value(encoder_output).view(B, T_E, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) # (B, nh, T, T) att = F.softmax(att, dim=-1) # (B, nh, T, T) att = self.attn_drop(att) y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side, (B, T, C) att = att.mean(dim=1, keepdim=False) # (B, T, T) # output projection y = self.resid_drop(self.proj(y)) return y, att class GELU2(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x * F.sigmoid(1.702 * x) class SinusoidalPosEmb(nn.Module): def __init__(self, num_steps, dim, rescale_steps=4000): super().__init__() self.dim = dim self.num_steps = float(num_steps) self.rescale_steps = float(rescale_steps) def forward(self, x): x = x / self.num_steps * self.rescale_steps device = x.device half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=device) * -emb) emb = x[:, None] * emb[None, :] emb = torch.cat((emb.sin(), emb.cos()), dim=-1) return emb class AdaLayerNorm(nn.Module): def __init__(self, n_embd, diffusion_step, emb_type="adalayernorm_abs"): super().__init__() if "abs" in emb_type: self.emb = SinusoidalPosEmb(diffusion_step, n_embd) else: self.emb = nn.Embedding(diffusion_step, n_embd) self.silu = nn.SiLU() self.linear = nn.Linear(n_embd, n_embd*2) self.layernorm = nn.LayerNorm(n_embd, elementwise_affine=False) self.diff_step = diffusion_step def forward(self, x, timestep): if timestep[0] >= self.diff_step: _emb = self.emb.weight.mean(dim=0, keepdim=True).repeat(len(timestep), 1) emb = self.linear(self.silu(_emb)).unsqueeze(1) else: emb = self.linear(self.silu(self.emb(timestep))).unsqueeze(1) scale, shift = torch.chunk(emb, 2, dim=2) x = self.layernorm(x) * (1 + scale) + shift return x class AdaInsNorm(nn.Module): def __init__(self, n_embd, diffusion_step, emb_type="adainsnorm_abs"): super().__init__() if "abs" in emb_type: self.emb = SinusoidalPosEmb(diffusion_step, n_embd) else: self.emb = nn.Embedding(diffusion_step, n_embd) self.silu = nn.SiLU() self.linear = nn.Linear(n_embd, n_embd*2) self.instancenorm = nn.InstanceNorm1d(n_embd) def forward(self, x, timestep): emb = self.linear(self.silu(self.emb(timestep))).unsqueeze(1) scale, shift = torch.chunk(emb, 2, dim=2) x = self.instancenorm(x.transpose(-1, -2)).transpose(-1,-2) * (1 + scale) + shift return x class Block(nn.Module): """ an unassuming Transformer block """ def __init__(self, class_type='adalayernorm', class_number=1000, condition_seq_len=77, n_embd=1024, n_head=16, seq_len=256, attn_pdrop=0.1, resid_pdrop=0.1, mlp_hidden_times=4, activate='GELU', attn_type='full', if_upsample=False, upsample_type='bilinear', upsample_pre_channel=0, content_spatial_size=None, # H , W conv_attn_kernel_size=None, # only need for dalle_conv attention condition_dim=1024, diffusion_step=100, timestep_type='adalayernorm', window_size = 8, mlp_type = 'fc', ): super().__init__() self.if_upsample = if_upsample self.attn_type = attn_type if attn_type in ['selfcross', 'selfcondition', 'self']: if 'adalayernorm' in timestep_type: self.ln1 = AdaLayerNorm(n_embd, diffusion_step, timestep_type) else: print("timestep_type wrong") else: self.ln1 = nn.LayerNorm(n_embd) self.ln2 = nn.LayerNorm(n_embd) # self.if_selfcross = False if attn_type in ['self', 'selfcondition']: self.attn = FullAttention( n_embd=n_embd, n_head=n_head, seq_len=seq_len, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, ) if attn_type == 'selfcondition': if 'adalayernorm' in class_type: self.ln2 = AdaLayerNorm(n_embd, class_number, class_type) else: self.ln2 = AdaInsNorm(n_embd, class_number, class_type) elif attn_type == 'selfcross': self.attn1 = FullAttention( n_embd=n_embd, n_head=n_head, seq_len=seq_len, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, ) self.attn2 = CrossAttention( condition_seq_len, n_embd=n_embd, condition_embd=condition_dim, n_head=n_head, seq_len=seq_len, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, ) if 'adalayernorm' in timestep_type: self.ln1_1 = AdaLayerNorm(n_embd, diffusion_step, timestep_type) else: print("timestep_type wrong") else: print("attn_type error") assert activate in ['GELU', 'GELU2'] act = nn.GELU() if activate == 'GELU' else GELU2() if mlp_type == 'conv_mlp': self.mlp = Conv_MLP(n_embd, mlp_hidden_times, act, resid_pdrop) else: self.mlp = nn.Sequential( nn.Linear(n_embd, mlp_hidden_times * n_embd), act, nn.Linear(mlp_hidden_times * n_embd, n_embd), nn.Dropout(resid_pdrop), ) def forward(self, x, encoder_output, timestep, mask=None): if self.attn_type == "selfcross": a, att = self.attn1(self.ln1(x, timestep), encoder_output, mask=mask) x = x + a a, att = self.attn2(self.ln1_1(x, timestep), encoder_output, mask=mask) x = x + a elif self.attn_type == "selfcondition": a, att = self.attn(self.ln1(x, timestep), encoder_output, mask=mask) x = x + a x = x + self.mlp(self.ln2(x, encoder_output.long())) # only one really use encoder_output return x, att else: # 'self' a, att = self.attn(self.ln1(x, timestep), encoder_output, mask=mask) x = x + a x = x + self.mlp(self.ln2(x)) return x, att class Conv_MLP(nn.Module): def __init__(self, n_embd, mlp_hidden_times, act, resid_pdrop): super().__init__() self.conv1 = nn.Conv2d(in_channels=n_embd, out_channels=int(mlp_hidden_times * n_embd), kernel_size=3, stride=1, padding=1) self.act = act self.conv2 = nn.Conv2d(in_channels=int(mlp_hidden_times * n_embd), out_channels=n_embd, kernel_size=3, stride=1, padding=1) self.dropout = nn.Dropout(resid_pdrop) def forward(self, x): n = x.size()[1] x = rearrange(x, 'b (h w) c -> b c h w', h=int(math.sqrt(n))) x = self.conv2(self.act(self.conv1(x))) x = rearrange(x, 'b c h w -> b (h w) c') return self.dropout(x) class Text2ImageTransformer(nn.Module): def __init__( self, condition_seq_len=77, n_layer=14, n_embd=1024, n_head=16, content_seq_len=1024, attn_pdrop=0, resid_pdrop=0, mlp_hidden_times=4, block_activate=None, attn_type='selfcross', content_spatial_size=[32,32], # H , W condition_dim=512, diffusion_step=1000, timestep_type='adalayernorm', content_emb_config=None, mlp_type='fc', checkpoint=False, ): super().__init__() self.use_checkpoint = checkpoint self.content_emb = instantiate_from_config(content_emb_config) # transformer assert attn_type == 'selfcross' all_attn_type = [attn_type] * n_layer if content_spatial_size is None: s = int(math.sqrt(content_seq_len)) assert s * s == content_seq_len content_spatial_size = (s, s) self.blocks = nn.Sequential(*[Block( condition_seq_len, n_embd=n_embd, n_head=n_head, seq_len=content_seq_len, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, mlp_hidden_times=mlp_hidden_times, activate=block_activate, attn_type=all_attn_type[n], content_spatial_size=content_spatial_size, # H , W condition_dim = condition_dim, diffusion_step = diffusion_step, timestep_type = timestep_type, mlp_type = mlp_type, ) for n in range(n_layer)]) # final prediction head out_cls = self.content_emb.num_embed-1 self.to_logits = nn.Sequential( nn.LayerNorm(n_embd), nn.Linear(n_embd, out_cls), ) self.condition_seq_len = condition_seq_len self.content_seq_len = content_seq_len self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): if module.elementwise_affine == True: module.bias.data.zero_() module.weight.data.fill_(1.0) def parameters(self, recurse=True, name=None): """ Following minGPT: This long function is unfortunately doing something very simple and is being very defensive: We are separating out all parameters of the model into two buckets: those that will experience weight decay for regularization and those that won't (biases, and layernorm/embedding weights). We are then returning the PyTorch optimizer object. """ # return super().parameters(recurse=True) if name is None or name == 'none': return super().parameters(recurse=recurse) else: # separate out all parameters to those that will and won't experience regularizing weight decay print("GPTLikeTransformer: get parameters by the overwrite method!") decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) # special case the position embedding parameter as not decayed module_name = ['condition_emb', 'content_emb'] pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb'] for mn in module_name: if hasattr(self, mn) and getattr(self, mn) is not None: for pn in pos_emb_name: if hasattr(getattr(self, mn), pn): if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter): no_decay.add('{}.{}'.format(mn, pn)) # validate that we considered every parameter param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad} inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] return optim_groups def forward( self, input, cond_emb, t): cont_emb = self.content_emb(input) emb = cont_emb for block_idx in range(len(self.blocks)): if self.use_checkpoint == False: emb, att_weight = self.blocks[block_idx](emb, cond_emb, t.cuda()) # B x (Ld+Lt) x D, B x (Ld+Lt) x (Ld+Lt) else: emb, att_weight = checkpoint(self.blocks[block_idx], emb, cond_emb, t.cuda()) logits = self.to_logits(emb) # B x (Ld+Lt) x n out = rearrange(logits, 'b l c -> b c l') return out class Condition2ImageTransformer(nn.Module): def __init__( self, class_type='adalayernorm', class_number=1000, n_layer=24, n_embd=1024, n_head=16, content_seq_len=1024, attn_pdrop=0, resid_pdrop=0, mlp_hidden_times=4, block_activate=None, attn_type='selfcondition', content_spatial_size=[32,32], # H , W diffusion_step=100, timestep_type='adalayernorm', content_emb_config=None, mlp_type="conv_mlp", ): super().__init__() self.content_emb = instantiate_from_config(content_emb_config) # transformer assert attn_type == 'selfcondition' all_attn_type = [attn_type] * n_layer if content_spatial_size is None: s = int(math.sqrt(content_seq_len)) assert s * s == content_seq_len content_spatial_size = (s, s) self.blocks = nn.Sequential(*[Block( class_type=class_type, class_number=class_number, n_embd=n_embd, n_head=n_head, seq_len=content_seq_len, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, mlp_hidden_times=mlp_hidden_times, activate=block_activate, attn_type=all_attn_type[n], content_spatial_size=content_spatial_size, # H , W diffusion_step = diffusion_step, timestep_type = timestep_type, mlp_type = mlp_type, ) for n in range(n_layer)]) # final prediction head out_cls = self.content_emb.num_embed-1 self.to_logits = nn.Sequential( nn.LayerNorm(n_embd), nn.Linear(n_embd, out_cls), ) self.content_seq_len = content_seq_len self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): if module.elementwise_affine == True: module.bias.data.zero_() module.weight.data.fill_(1.0) def parameters(self, recurse=True, name=None): """ Following minGPT: This long function is unfortunately doing something very simple and is being very defensive: We are separating out all parameters of the model into two buckets: those that will experience weight decay for regularization and those that won't (biases, and layernorm/embedding weights). We are then returning the PyTorch optimizer object. """ # return super().parameters(recurse=True) if name is None or name == 'none': return super().parameters(recurse=recurse) else: # separate out all parameters to those that will and won't experience regularizing weight decay print("GPTLikeTransformer: get parameters by the overwrite method!") decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) # special case the position embedding parameter as not decayed module_name = ['condition_emb', 'content_emb'] pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb'] for mn in module_name: if hasattr(self, mn) and getattr(self, mn) is not None: for pn in pos_emb_name: if hasattr(getattr(self, mn), pn): if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter): no_decay.add('{}.{}'.format(mn, pn)) # validate that we considered every parameter param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad} inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] return optim_groups def forward( self, input, cond_emb, t): cont_emb = self.content_emb(input) emb = cont_emb for block_idx in range(len(self.blocks)): emb, att_weight = self.blocks[block_idx](emb, cond_emb, t.cuda()) # B x (Ld+Lt) x D, B x (Ld+Lt) x (Ld+Lt) logits = self.to_logits(emb) # B x (Ld+Lt) x n out = rearrange(logits, 'b l c -> b c l') return out class UnCondition2ImageTransformer(nn.Module): def __init__( self, class_type='adalayernorm', n_layer=24, n_embd=512, n_head=16, content_seq_len=256, attn_pdrop=0, resid_pdrop=0, mlp_hidden_times=4, block_activate=None, attn_type='self', content_spatial_size=[16,16], # H , W diffusion_step=100, timestep_type='adalayernorm', content_emb_config=None, mlp_type="conv_mlp", ): super().__init__() self.content_emb = instantiate_from_config(content_emb_config) # transformer assert attn_type == 'self' all_attn_type = [attn_type] * n_layer if content_spatial_size is None: s = int(math.sqrt(content_seq_len)) assert s * s == content_seq_len content_spatial_size = (s, s) self.blocks = nn.Sequential(*[Block( n_embd=n_embd, n_head=n_head, seq_len=content_seq_len, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, mlp_hidden_times=mlp_hidden_times, activate=block_activate, attn_type=all_attn_type[n], content_spatial_size=content_spatial_size, # H , W diffusion_step = diffusion_step, timestep_type = timestep_type, mlp_type = mlp_type, ) for n in range(n_layer)]) # final prediction head out_cls = self.content_emb.num_embed-1 self.to_logits = nn.Sequential( nn.LayerNorm(n_embd), nn.Linear(n_embd, out_cls), ) self.content_seq_len = content_seq_len self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): if module.elementwise_affine == True: module.bias.data.zero_() module.weight.data.fill_(1.0) def parameters(self, recurse=True, name=None): """ Following minGPT: This long function is unfortunately doing something very simple and is being very defensive: We are separating out all parameters of the model into two buckets: those that will experience weight decay for regularization and those that won't (biases, and layernorm/embedding weights). We are then returning the PyTorch optimizer object. """ # return super().parameters(recurse=True) if name is None or name == 'none': return super().parameters(recurse=recurse) else: # separate out all parameters to those that will and won't experience regularizing weight decay print("GPTLikeTransformer: get parameters by the overwrite method!") decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) # special case the position embedding parameter as not decayed module_name = ['condition_emb', 'content_emb'] pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb'] for mn in module_name: if hasattr(self, mn) and getattr(self, mn) is not None: for pn in pos_emb_name: if hasattr(getattr(self, mn), pn): if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter): no_decay.add('{}.{}'.format(mn, pn)) # validate that we considered every parameter param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad} inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] return optim_groups def forward( self, input, cond_emb, t): cont_emb = self.content_emb(input) emb = cont_emb for block_idx in range(len(self.blocks)): emb, att_weight = self.blocks[block_idx](emb, cond_emb, t.cuda()) # B x (Ld+Lt) x D, B x (Ld+Lt) x (Ld+Lt) logits = self.to_logits(emb) # B x (Ld+Lt) x n out = rearrange(logits, 'b l c -> b c l') return out
30,407
41
131
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/lr_scheduler.py
import numpy as np class LambdaWarmUpCosineScheduler: """ note: use with a base_lr of 1.0 """ def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): self.lr_warm_up_steps = warm_up_steps self.lr_start = lr_start self.lr_min = lr_min self.lr_max = lr_max self.lr_max_decay_steps = max_decay_steps self.last_lr = 0. self.verbosity_interval = verbosity_interval def schedule(self, n): if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") if n < self.lr_warm_up_steps: lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start self.last_lr = lr return lr else: t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) t = min(t, 1.0) lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( 1 + np.cos(t * np.pi)) self.last_lr = lr return lr def __call__(self, n): return self.schedule(n)
1,205
33.457143
114
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/util.py
import os, hashlib import requests from tqdm import tqdm URL_MAP = { "vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1" } CKPT_MAP = { "vgg_lpips": "vgg.pth" } MD5_MAP = { "vgg_lpips": "d507d7349b931f0638a25a48a722f98a" } def download(url, local_path, chunk_size=1024): os.makedirs(os.path.split(local_path)[0], exist_ok=True) with requests.get(url, stream=True) as r: total_size = int(r.headers.get("content-length", 0)) with tqdm(total=total_size, unit="B", unit_scale=True) as pbar: with open(local_path, "wb") as f: for data in r.iter_content(chunk_size=chunk_size): if data: f.write(data) pbar.update(chunk_size) def md5_hash(path): with open(path, "rb") as f: content = f.read() return hashlib.md5(content).hexdigest() def get_ckpt_path(name, root, check=False): assert name in URL_MAP path = os.path.join(root, CKPT_MAP[name]) if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]): print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path)) download(URL_MAP[name], path) md5 = md5_hash(path) assert md5 == MD5_MAP[name], md5 return path class KeyNotFoundError(Exception): def __init__(self, cause, keys=None, visited=None): self.cause = cause self.keys = keys self.visited = visited messages = list() if keys is not None: messages.append("Key not found: {}".format(keys)) if visited is not None: messages.append("Visited: {}".format(visited)) messages.append("Cause:\n{}".format(cause)) message = "\n".join(messages) super().__init__(message) def retrieve( list_or_dict, key, splitval="/", default=None, expand=True, pass_success=False ): """Given a nested list or dict return the desired value at key expanding callable nodes if necessary and :attr:`expand` is ``True``. The expansion is done in-place. Parameters ---------- list_or_dict : list or dict Possibly nested list or dictionary. key : str key/to/value, path like string describing all keys necessary to consider to get to the desired value. List indices can also be passed here. splitval : str String that defines the delimiter between keys of the different depth levels in `key`. default : obj Value returned if :attr:`key` is not found. expand : bool Whether to expand callable nodes on the path or not. Returns ------- The desired value or if :attr:`default` is not ``None`` and the :attr:`key` is not found returns ``default``. Raises ------ Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is ``None``. """ keys = key.split(splitval) success = True try: visited = [] parent = None last_key = None for key in keys: if callable(list_or_dict): if not expand: raise KeyNotFoundError( ValueError( "Trying to get past callable node with expand=False." ), keys=keys, visited=visited, ) list_or_dict = list_or_dict() parent[last_key] = list_or_dict last_key = key parent = list_or_dict try: if isinstance(list_or_dict, dict): list_or_dict = list_or_dict[key] else: list_or_dict = list_or_dict[int(key)] except (KeyError, IndexError, ValueError) as e: raise KeyNotFoundError(e, keys=keys, visited=visited) visited += [key] # final expansion of retrieved value if expand and callable(list_or_dict): list_or_dict = list_or_dict() parent[last_key] = list_or_dict except KeyNotFoundError as e: if default is None: raise e else: list_or_dict = default success = False if not pass_success: return list_or_dict else: return list_or_dict, success if __name__ == "__main__": config = {"keya": "a", "keyb": "b", "keyc": {"cc1": 1, "cc2": 2, } } from omegaconf import OmegaConf config = OmegaConf.create(config) print(config) retrieve(config, "keya")
4,777
29.240506
85
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/util.py
import torch import torch.nn as nn def count_params(model): total_params = sum(p.numel() for p in model.parameters()) return total_params class ActNorm(nn.Module): def __init__(self, num_features, logdet=False, affine=True, allow_reverse_init=False): assert affine super().__init__() self.logdet = logdet self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1)) self.allow_reverse_init = allow_reverse_init self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8)) def initialize(self, input): with torch.no_grad(): flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1) mean = ( flatten.mean(1) .unsqueeze(1) .unsqueeze(2) .unsqueeze(3) .permute(1, 0, 2, 3) ) std = ( flatten.std(1) .unsqueeze(1) .unsqueeze(2) .unsqueeze(3) .permute(1, 0, 2, 3) ) self.loc.data.copy_(-mean) self.scale.data.copy_(1 / (std + 1e-6)) def forward(self, input, reverse=False): if reverse: return self.reverse(input) if len(input.shape) == 2: input = input[:,:,None,None] squeeze = True else: squeeze = False _, _, height, width = input.shape if self.training and self.initialized.item() == 0: self.initialize(input) self.initialized.fill_(1) h = self.scale * (input + self.loc) if squeeze: h = h.squeeze(-1).squeeze(-1) if self.logdet: log_abs = torch.log(torch.abs(self.scale)) logdet = height*width*torch.sum(log_abs) logdet = logdet * torch.ones(input.shape[0]).to(input) return h, logdet return h def reverse(self, output): if self.training and self.initialized.item() == 0: if not self.allow_reverse_init: raise RuntimeError( "Initializing ActNorm in reverse direction is " "disabled by default. Use allow_reverse_init=True to enable." ) else: self.initialize(output) self.initialized.fill_(1) if len(output.shape) == 2: output = output[:,:,None,None] squeeze = True else: squeeze = False h = output / self.scale - self.loc if squeeze: h = h.squeeze(-1).squeeze(-1) return h class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError class Labelator(AbstractEncoder): """Net2Net Interface for Class-Conditional Model""" def __init__(self, n_classes, quantize_interface=True): super().__init__() self.n_classes = n_classes self.quantize_interface = quantize_interface def encode(self, c): c = c[:,None] if self.quantize_interface: return c, None, [None, None, c.long()] return c class SOSProvider(AbstractEncoder): # for unconditional training def __init__(self, sos_token, quantize_interface=True): super().__init__() self.sos_token = sos_token self.quantize_interface = quantize_interface def encode(self, x): # get batch size from data and replicate sos_token c = torch.ones(x.shape[0], 1)*self.sos_token c = c.long().to(x.device) if self.quantize_interface: return c, None, [None, None, c] return c
3,847
28.374046
85
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/vqvae/quantize.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from torch import einsum from einops import rearrange class VectorQuantizer(nn.Module): """ see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py ____________________________________________ Discretization bottleneck part of the VQ-VAE. Inputs: - n_e : number of embeddings - e_dim : dimension of embedding - beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 _____________________________________________ """ # NOTE: this class contains a bug regarding beta; see VectorQuantizer2 for # a fix and use legacy=False to apply that fix. VectorQuantizer2 can be # used wherever VectorQuantizer has been used before and is additionally # more efficient. def __init__(self, n_e, e_dim, beta): super(VectorQuantizer, self).__init__() self.n_e = n_e self.e_dim = e_dim self.beta = beta self.embedding = nn.Embedding(self.n_e, self.e_dim) self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) def forward(self, z): """ Inputs the output of the encoder network z and maps it to a discrete one-hot vector that is the index of the closest embedding vector e_j z (continuous) -> z_q (discrete) z.shape = (batch, channel, height, width) quantization pipeline: 1. get encoder input (B,C,H,W) 2. flatten input to (B*H*W,C) """ # reshape z -> (batch, height, width, channel) and flatten z = z.permute(0, 2, 3, 1).contiguous() z_flattened = z.view(-1, self.e_dim) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ torch.sum(self.embedding.weight**2, dim=1) - 2 * \ torch.matmul(z_flattened, self.embedding.weight.t()) ## could possible replace this here # #\start... # find closest encodings min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1) min_encodings = torch.zeros( min_encoding_indices.shape[0], self.n_e).to(z) min_encodings.scatter_(1, min_encoding_indices, 1) # dtype min encodings: torch.float32 # min_encodings shape: torch.Size([2048, 512]) # min_encoding_indices.shape: torch.Size([2048, 1]) # get quantized latent vectors z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) #.........\end # with: # .........\start #min_encoding_indices = torch.argmin(d, dim=1) #z_q = self.embedding(min_encoding_indices) # ......\end......... (TODO) # compute loss for embedding loss = torch.mean((z_q.detach()-z)**2) + self.beta * \ torch.mean((z_q - z.detach()) ** 2) # preserve gradients z_q = z + (z_q - z).detach() # perplexity e_mean = torch.mean(min_encodings, dim=0) perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10))) # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def get_codebook_entry(self, indices, shape): # shape specifying (batch, height, width, channel) # TODO: check for more easy handling with nn.Embedding min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices) min_encodings.scatter_(1, indices[:,None], 1) # get quantized latent vectors z_q = torch.matmul(min_encodings.float(), self.embedding.weight) if shape is not None: z_q = z_q.view(shape) # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() return z_q class GumbelQuantize(nn.Module): """ credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!) Gumbel Softmax trick quantizer Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016 https://arxiv.org/abs/1611.01144 """ def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True, kl_weight=5e-4, temp_init=1.0, use_vqinterface=True, remap=None, unknown_index="random"): super().__init__() self.embedding_dim = embedding_dim self.n_embed = n_embed self.straight_through = straight_through self.temperature = temp_init self.kl_weight = kl_weight self.proj = nn.Conv2d(num_hiddens, n_embed, 1) self.embed = nn.Embedding(n_embed, embedding_dim) self.use_vqinterface = use_vqinterface self.remap = remap if self.remap is not None: self.register_buffer("used", torch.tensor(np.load(self.remap))) self.re_embed = self.used.shape[0] self.unknown_index = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": self.unknown_index = self.re_embed self.re_embed = self.re_embed+1 print(f"Remapping {self.n_embed} indices to {self.re_embed} indices. " f"Using {self.unknown_index} for unknown indices.") else: self.re_embed = n_embed def remap_to_used(self, inds): ishape = inds.shape assert len(ishape)>1 inds = inds.reshape(ishape[0],-1) used = self.used.to(inds) match = (inds[:,:,None]==used[None,None,...]).long() new = match.argmax(-1) unknown = match.sum(2)<1 if self.unknown_index == "random": new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device) else: new[unknown] = self.unknown_index return new.reshape(ishape) def unmap_to_all(self, inds): ishape = inds.shape assert len(ishape)>1 inds = inds.reshape(ishape[0],-1) used = self.used.to(inds) if self.re_embed > self.used.shape[0]: # extra token inds[inds>=self.used.shape[0]] = 0 # simply set to zero back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds) return back.reshape(ishape) def forward(self, z, temp=None, return_logits=False): # force hard = True when we are in eval mode, as we must quantize. actually, always true seems to work hard = self.straight_through if self.training else True temp = self.temperature if temp is None else temp logits = self.proj(z) if self.remap is not None: # continue only with used logits full_zeros = torch.zeros_like(logits) logits = logits[:,self.used,...] soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard) if self.remap is not None: # go back to all entries but unused set to zero full_zeros[:,self.used,...] = soft_one_hot soft_one_hot = full_zeros z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight) # + kl divergence to the prior loss qy = F.softmax(logits, dim=1) diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean() ind = soft_one_hot.argmax(dim=1) if self.remap is not None: ind = self.remap_to_used(ind) if self.use_vqinterface: if return_logits: return z_q, diff, (None, None, ind), logits return z_q, diff, (None, None, ind) return z_q, diff, ind def get_codebook_entry(self, indices, shape): b, h, w, c = shape assert b*h*w == indices.shape[0] indices = rearrange(indices, '(b h w) -> b h w', b=b, h=h, w=w) if self.remap is not None: indices = self.unmap_to_all(indices) one_hot = F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float() z_q = einsum('b n h w, n d -> b d h w', one_hot, self.embed.weight) return z_q class VectorQuantizer2(nn.Module): """ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix multiplications and allows for post-hoc remapping of indices. """ # NOTE: due to a bug the beta term was applied to the wrong term. for # backwards compatibility we use the buggy version by default, but you can # specify legacy=False to fix it. def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random", sane_index_shape=False, legacy=True): super().__init__() self.n_e = n_e self.e_dim = e_dim self.beta = beta self.legacy = legacy self.embedding = nn.Embedding(self.n_e, self.e_dim) self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) self.remap = remap if self.remap is not None: self.register_buffer("used", torch.tensor(np.load(self.remap))) self.re_embed = self.used.shape[0] self.unknown_index = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": self.unknown_index = self.re_embed self.re_embed = self.re_embed+1 print(f"Remapping {self.n_e} indices to {self.re_embed} indices. " f"Using {self.unknown_index} for unknown indices.") else: self.re_embed = n_e self.sane_index_shape = sane_index_shape def remap_to_used(self, inds): ishape = inds.shape assert len(ishape)>1 inds = inds.reshape(ishape[0],-1) used = self.used.to(inds) match = (inds[:,:,None]==used[None,None,...]).long() new = match.argmax(-1) unknown = match.sum(2)<1 if self.unknown_index == "random": new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device) else: new[unknown] = self.unknown_index return new.reshape(ishape) def unmap_to_all(self, inds): ishape = inds.shape assert len(ishape)>1 inds = inds.reshape(ishape[0],-1) used = self.used.to(inds) if self.re_embed > self.used.shape[0]: # extra token inds[inds>=self.used.shape[0]] = 0 # simply set to zero back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds) return back.reshape(ishape) def forward(self, z, temp=None, rescale_logits=False, return_logits=False): assert temp is None or temp==1.0, "Only for interface compatible with Gumbel" assert rescale_logits==False, "Only for interface compatible with Gumbel" assert return_logits==False, "Only for interface compatible with Gumbel" # reshape z -> (batch, height, width, channel) and flatten z = rearrange(z, 'b c h w -> b h w c').contiguous() z_flattened = z.view(-1, self.e_dim) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ torch.sum(self.embedding.weight**2, dim=1) - 2 * \ torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) min_encoding_indices = torch.argmin(d, dim=1) z_q = self.embedding(min_encoding_indices).view(z.shape) perplexity = None min_encodings = None # compute loss for embedding if not self.legacy: loss = self.beta * torch.mean((z_q.detach()-z)**2) + \ torch.mean((z_q - z.detach()) ** 2) else: loss = torch.mean((z_q.detach()-z)**2) + self.beta * \ torch.mean((z_q - z.detach()) ** 2) # preserve gradients z_q = z + (z_q - z).detach() # reshape back to match original input shape z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous() if self.remap is not None: min_encoding_indices = min_encoding_indices.reshape(z.shape[0],-1) # add batch axis min_encoding_indices = self.remap_to_used(min_encoding_indices) min_encoding_indices = min_encoding_indices.reshape(-1,1) # flatten if self.sane_index_shape: min_encoding_indices = min_encoding_indices.reshape( z_q.shape[0], z_q.shape[2], z_q.shape[3]) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def get_codebook_entry(self, indices, shape): # shape specifying (batch, height, width, channel) if self.remap is not None: indices = indices.reshape(shape[0],-1) # add batch axis indices = self.unmap_to_all(indices) indices = indices.reshape(-1) # flatten again # get quantized latent vectors z_q = self.embedding(indices) if shape is not None: z_q = z_q.view(shape) # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() return z_q
13,259
39.181818
110
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/discriminator/model.py
import functools import torch.nn as nn from image_synthesis.taming.modules.util import ActNorm def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) class NLayerDiscriminator(nn.Module): """Defines a PatchGAN discriminator as in Pix2Pix --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py """ def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False): """Construct a PatchGAN discriminator Parameters: input_nc (int) -- the number of channels in input images ndf (int) -- the number of filters in the last conv layer n_layers (int) -- the number of conv layers in the discriminator norm_layer -- normalization layer """ super(NLayerDiscriminator, self).__init__() if not use_actnorm: norm_layer = nn.BatchNorm2d else: norm_layer = ActNorm if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters use_bias = norm_layer.func != nn.BatchNorm2d else: use_bias = norm_layer != nn.BatchNorm2d kw = 4 padw = 1 sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): # gradually increase the number of filters nf_mult_prev = nf_mult nf_mult = min(2 ** n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2 ** n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] sequence += [ nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map self.main = nn.Sequential(*sequence) def forward(self, input): """Standard forward.""" return self.main(input)
2,566
36.75
116
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/misc/coord.py
import torch class CoordStage(object): def __init__(self, n_embed, down_factor): self.n_embed = n_embed self.down_factor = down_factor def eval(self): return self def encode(self, c): """fake vqmodel interface""" assert 0.0 <= c.min() and c.max() <= 1.0 b,ch,h,w = c.shape assert ch == 1 c = torch.nn.functional.interpolate(c, scale_factor=1/self.down_factor, mode="area") c = c.clamp(0.0, 1.0) c = self.n_embed*c c_quant = c.round() c_ind = c_quant.to(dtype=torch.long) info = None, None, c_ind return c_quant, None, info def decode(self, c): c = c/self.n_embed c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor, mode="nearest") return c
904
27.28125
79
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/diffusionmodules/model.py
# pytorch_diffusion + derived encoder decoder import math import torch import torch.nn as nn import numpy as np def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0,1,0,0)) return emb def nonlinearity(x): # swish return x*torch.sigmoid(x) def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) return x class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = (0,1,0,1) x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x, temb): h = x h = self.norm1(h) h = nonlinearity(h) h = self.conv1(h) if temb is not None: h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] h = self.norm2(h) h = nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) else: x = self.nin_shortcut(x) return x+h class AttnBlock(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) # compute attention b,c,h,w = q.shape q = q.reshape(b,c,h*w) q = q.permute(0,2,1) # b,hw,c k = k.reshape(b,c,h*w) # b,c,hw w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] w_ = w_ * (int(c)**(-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values v = v.reshape(b,c,h*w) w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] h_ = h_.reshape(b,c,h,w) h_ = self.proj_out(h_) return x+h_ class Model(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, use_timestep=True): super().__init__() self.ch = ch self.temb_ch = self.ch*4 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.use_timestep = use_timestep if self.use_timestep: # timestep embedding self.temb = nn.Module() self.temb.dense = nn.ModuleList([ torch.nn.Linear(self.ch, self.temb_ch), torch.nn.Linear(self.temb_ch, self.temb_ch), ]) # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch*in_ch_mult[i_level] block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(AttnBlock(block_in)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = AttnBlock(block_in) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch*ch_mult[i_level] skip_in = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): if i_block == self.num_res_blocks: skip_in = ch*in_ch_mult[i_level] block.append(ResnetBlock(in_channels=block_in+skip_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(AttnBlock(block_in)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, x, t=None): #assert x.shape[2] == x.shape[3] == self.resolution if self.use_timestep: # timestep embedding assert t is not None temb = get_timestep_embedding(t, self.ch) temb = self.temb.dense[0](temb) temb = nonlinearity(temb) temb = self.temb.dense[1](temb) else: temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block]( torch.cat([h, hs.pop()], dim=1), temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class Encoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, double_z=True, **ignore_kwargs): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch*in_ch_mult[i_level] block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(AttnBlock(block_in)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = AttnBlock(block_in) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, 2*z_channels if double_z else z_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): #assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution) # timestep embedding temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class Decoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, give_pre_end=False, **ignorekwargs): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.give_pre_end = give_pre_end # compute in_ch_mult, block_in and curr_res at lowest res in_ch_mult = (1,)+tuple(ch_mult) block_in = ch*ch_mult[self.num_resolutions-1] curr_res = resolution // 2**(self.num_resolutions-1) self.z_shape = (1,z_channels,curr_res,curr_res) print("Working with z of shape {} = {} dimensions.".format( self.z_shape, np.prod(self.z_shape))) # z to block_in self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = AttnBlock(block_in) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(AttnBlock(block_in)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, z): #assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape # timestep embedding temb = None # z to block_in h = self.conv_in(z) # middle h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end if self.give_pre_end: return h h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class VUNet(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, c_channels, resolution, z_channels, use_timestep=False, **ignore_kwargs): super().__init__() self.ch = ch self.temb_ch = self.ch*4 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.use_timestep = use_timestep if self.use_timestep: # timestep embedding self.temb = nn.Module() self.temb.dense = nn.ModuleList([ torch.nn.Linear(self.ch, self.temb_ch), torch.nn.Linear(self.temb_ch, self.temb_ch), ]) # downsampling self.conv_in = torch.nn.Conv2d(c_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch*in_ch_mult[i_level] block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(AttnBlock(block_in)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) self.z_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=1, stride=1, padding=0) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=2*block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = AttnBlock(block_in) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch*ch_mult[i_level] skip_in = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): if i_block == self.num_res_blocks: skip_in = ch*in_ch_mult[i_level] block.append(ResnetBlock(in_channels=block_in+skip_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(AttnBlock(block_in)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, x, z): #assert x.shape[2] == x.shape[3] == self.resolution if self.use_timestep: # timestep embedding assert t is not None temb = get_timestep_embedding(t, self.ch) temb = self.temb.dense[0](temb) temb = nonlinearity(temb) temb = self.temb.dense[1](temb) else: temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] z = self.z_in(z) h = torch.cat((h,z),dim=1) h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block]( torch.cat([h, hs.pop()], dim=1), temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class SimpleDecoder(nn.Module): def __init__(self, in_channels, out_channels, *args, **kwargs): super().__init__() self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), ResnetBlock(in_channels=in_channels, out_channels=2 * in_channels, temb_channels=0, dropout=0.0), ResnetBlock(in_channels=2 * in_channels, out_channels=4 * in_channels, temb_channels=0, dropout=0.0), ResnetBlock(in_channels=4 * in_channels, out_channels=2 * in_channels, temb_channels=0, dropout=0.0), nn.Conv2d(2*in_channels, in_channels, 1), Upsample(in_channels, with_conv=True)]) # end self.norm_out = Normalize(in_channels) self.conv_out = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): for i, layer in enumerate(self.model): if i in [1,2,3]: x = layer(x, None) else: x = layer(x) h = self.norm_out(x) h = nonlinearity(h) x = self.conv_out(h) return x class UpsampleDecoder(nn.Module): def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, ch_mult=(2,2), dropout=0.0): super().__init__() # upsampling self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks block_in = in_channels curr_res = resolution // 2 ** (self.num_resolutions - 1) self.res_blocks = nn.ModuleList() self.upsample_blocks = nn.ModuleList() for i_level in range(self.num_resolutions): res_block = [] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): res_block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out self.res_blocks.append(nn.ModuleList(res_block)) if i_level != self.num_resolutions - 1: self.upsample_blocks.append(Upsample(block_in, True)) curr_res = curr_res * 2 # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): # upsampling h = x for k, i_level in enumerate(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = self.res_blocks[i_level][i_block](h, None) if i_level != self.num_resolutions - 1: h = self.upsample_blocks[k](h) h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h
30,221
37.895753
121
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/transformer/mingpt.py
""" taken from: https://github.com/karpathy/minGPT/ GPT model: - the initial stem consists of a combination of token encoding and a positional encoding - the meat of it is a uniform sequence of Transformer blocks - each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block - all blocks feed into a central residual pathway similar to resnets - the final decoder is a linear projection into a vanilla Softmax classifier """ import math import logging import torch import torch.nn as nn from torch.nn import functional as F logger = logging.getLogger(__name__) class GPTConfig: """ base GPT config, params common to all GPT versions """ embd_pdrop = 0.1 resid_pdrop = 0.1 attn_pdrop = 0.1 def __init__(self, vocab_size, block_size, **kwargs): self.vocab_size = vocab_size self.block_size = block_size for k,v in kwargs.items(): setattr(self, k, v) class GPT1Config(GPTConfig): """ GPT-1 like network roughly 125M params """ n_layer = 12 n_head = 12 n_embd = 768 class CausalSelfAttention(nn.Module): """ A vanilla multi-head masked self-attention layer with a projection at the end. It is possible to use torch.nn.MultiheadAttention here but I am including an explicit implementation here to show that there is nothing too scary here. """ def __init__(self, config): super().__init__() assert config.n_embd % config.n_head == 0 # key, query, value projections for all heads self.key = nn.Linear(config.n_embd, config.n_embd) self.query = nn.Linear(config.n_embd, config.n_embd) self.value = nn.Linear(config.n_embd, config.n_embd) # regularization self.attn_drop = nn.Dropout(config.attn_pdrop) self.resid_drop = nn.Dropout(config.resid_pdrop) # output projection self.proj = nn.Linear(config.n_embd, config.n_embd) # causal mask to ensure that attention is only applied to the left in the input sequence mask = torch.tril(torch.ones(config.block_size, config.block_size)) if hasattr(config, "n_unmasked"): mask[:config.n_unmasked, :config.n_unmasked] = 1 self.register_buffer("mask", mask.view(1, 1, config.block_size, config.block_size)) self.n_head = config.n_head def forward(self, x, layer_past=None): B, T, C = x.size() # calculate query, key, values for all heads in batch and move head forward to be the batch dim k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) present = torch.stack((k, v)) if layer_past is not None: past_key, past_value = layer_past k = torch.cat((past_key, k), dim=-2) v = torch.cat((past_value, v), dim=-2) # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) if layer_past is None: att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf')) att = F.softmax(att, dim=-1) att = self.attn_drop(att) y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side # output projection y = self.resid_drop(self.proj(y)) return y, present # TODO: check that this does not break anything class Block(nn.Module): """ an unassuming Transformer block """ def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.ln2 = nn.LayerNorm(config.n_embd) self.attn = CausalSelfAttention(config) self.mlp = nn.Sequential( nn.Linear(config.n_embd, 4 * config.n_embd), nn.GELU(), # nice nn.Linear(4 * config.n_embd, config.n_embd), nn.Dropout(config.resid_pdrop), ) def forward(self, x, layer_past=None, return_present=False): # TODO: check that training still works if return_present: assert not self.training # layer past: tuple of length two with B, nh, T, hs attn, present = self.attn(self.ln1(x), layer_past=layer_past) x = x + attn x = x + self.mlp(self.ln2(x)) if layer_past is not None or return_present: return x, present return x class GPT(nn.Module): """ the full GPT language model, with a context size of block_size """ def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256, embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0): super().__init__() config = GPTConfig(vocab_size=vocab_size, block_size=block_size, embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop, n_layer=n_layer, n_head=n_head, n_embd=n_embd, n_unmasked=n_unmasked) # input embedding stem self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd) self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) self.drop = nn.Dropout(config.embd_pdrop) # transformer self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)]) # decoder head self.ln_f = nn.LayerNorm(config.n_embd) self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.block_size = config.block_size self.apply(self._init_weights) self.config = config logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters())) def get_block_size(self): return self.block_size def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def forward(self, idx, embeddings=None, targets=None): # forward the GPT model token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector if embeddings is not None: # prepend explicit embeddings token_embeddings = torch.cat((embeddings, token_embeddings), dim=1) t = token_embeddings.shape[1] assert t <= self.block_size, "Cannot forward, model block size is exhausted." position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector x = self.drop(token_embeddings + position_embeddings) x = self.blocks(x) x = self.ln_f(x) logits = self.head(x) # if we are given some desired targets also calculate the loss loss = None if targets is not None: loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) return logits, loss def forward_with_past(self, idx, embeddings=None, targets=None, past=None, past_length=None): # inference only assert not self.training token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector if embeddings is not None: # prepend explicit embeddings token_embeddings = torch.cat((embeddings, token_embeddings), dim=1) if past is not None: assert past_length is not None past = torch.cat(past, dim=-2) # n_layer, 2, b, nh, len_past, dim_head past_shape = list(past.shape) expected_shape = [self.config.n_layer, 2, idx.shape[0], self.config.n_head, past_length, self.config.n_embd//self.config.n_head] assert past_shape == expected_shape, f"{past_shape} =/= {expected_shape}" position_embeddings = self.pos_emb[:, past_length, :] # each position maps to a (learnable) vector else: position_embeddings = self.pos_emb[:, :token_embeddings.shape[1], :] x = self.drop(token_embeddings + position_embeddings) presents = [] # accumulate over layers for i, block in enumerate(self.blocks): x, present = block(x, layer_past=past[i, ...] if past is not None else None, return_present=True) presents.append(present) x = self.ln_f(x) logits = self.head(x) # if we are given some desired targets also calculate the loss loss = None if targets is not None: loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) return logits, loss, torch.stack(presents) # _, _, n_layer, 2, b, nh, 1, dim_head class DummyGPT(nn.Module): # for debugging def __init__(self, add_value=1): super().__init__() self.add_value = add_value def forward(self, idx): return idx + self.add_value, None class CodeGPT(nn.Module): """Takes in semi-embeddings""" def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256, embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0): super().__init__() config = GPTConfig(vocab_size=vocab_size, block_size=block_size, embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop, n_layer=n_layer, n_head=n_head, n_embd=n_embd, n_unmasked=n_unmasked) # input embedding stem self.tok_emb = nn.Linear(in_channels, config.n_embd) self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) self.drop = nn.Dropout(config.embd_pdrop) # transformer self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)]) # decoder head self.ln_f = nn.LayerNorm(config.n_embd) self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.block_size = config.block_size self.apply(self._init_weights) self.config = config logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters())) def get_block_size(self): return self.block_size def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def forward(self, idx, embeddings=None, targets=None): # forward the GPT model token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector if embeddings is not None: # prepend explicit embeddings token_embeddings = torch.cat((embeddings, token_embeddings), dim=1) t = token_embeddings.shape[1] assert t <= self.block_size, "Cannot forward, model block size is exhausted." position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector x = self.drop(token_embeddings + position_embeddings) x = self.blocks(x) x = self.taming_cinln_f(x) logits = self.head(x) # if we are given some desired targets also calculate the loss loss = None if targets is not None: loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) return logits, loss def top_k_logits(logits, k): v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[:, [-1]]] = -float('Inf') return out @torch.no_grad() def sample(model, x, steps, temperature=1.0, sample=False, top_k=None): """ take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in the sequence, feeding the predictions back into the model each time. Clearly the sampling has quadratic complexity unlike an RNN that is only linear, and has a finite context window of block_size, unlike an RNN that has an infinite context window. """ block_size = model.get_block_size() model.eval() for k in range(steps): x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed logits, _ = model(x_cond) # pluck the logits at the final step and scale by temperature logits = logits[:, -1, :] / temperature # optionally crop probabilities to only the top k options if top_k is not None: logits = top_k_logits(logits, top_k) # apply softmax to convert to probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution or take the most likely if sample: ix = torch.multinomial(probs, num_samples=1) else: _, ix = torch.topk(probs, k=1, dim=-1) # append to the sequence and continue x = torch.cat((x, ix), dim=1) return x class KMeans(nn.Module): def __init__(self, ncluster=512, nc=3, niter=10): super().__init__() self.ncluster = ncluster self.nc = nc self.niter = niter self.shape = (3,32,32) self.register_buffer("C", torch.zeros(self.ncluster,nc)) self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8)) def is_initialized(self): return self.initialized.item() == 1 @torch.no_grad() def initialize(self, x): N, D = x.shape assert D == self.nc, D c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random for i in range(self.niter): # assign all pixels to the closest codebook element a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1) # move each codebook element to be the mean of the pixels that assigned to it c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)]) # re-assign any poorly positioned codebook elements nanix = torch.any(torch.isnan(c), dim=1) ndead = nanix.sum().item() print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead)) c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters self.C.copy_(c) self.initialized.fill_(1) def forward(self, x, reverse=False, shape=None): if not reverse: # flatten bs,c,h,w = x.shape assert c == self.nc x = x.reshape(bs,c,h*w,1) C = self.C.permute(1,0) C = C.reshape(1,c,1,self.ncluster) a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices return a else: # flatten bs, HW = x.shape """ c = self.C.reshape( 1, self.nc, 1, self.ncluster) c = c[bs*[0],:,:,:] c = c[:,:,HW*[0],:] x = x.reshape(bs, 1, HW, 1) x = x[:,3*[0],:,:] x = torch.gather(c, dim=3, index=x) """ x = self.C[x] x = x.permute(0,2,1) shape = shape if shape is not None else self.shape x = x.reshape(bs, *shape) return x
15,743
40.10705
140
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/transformer/permuter.py
import torch import torch.nn as nn import numpy as np class AbstractPermuter(nn.Module): def __init__(self, *args, **kwargs): super().__init__() def forward(self, x, reverse=False): raise NotImplementedError class Identity(AbstractPermuter): def __init__(self): super().__init__() def forward(self, x, reverse=False): return x class Subsample(AbstractPermuter): def __init__(self, H, W): super().__init__() C = 1 indices = np.arange(H*W).reshape(C,H,W) while min(H, W) > 1: indices = indices.reshape(C,H//2,2,W//2,2) indices = indices.transpose(0,2,4,1,3) indices = indices.reshape(C*4,H//2, W//2) H = H//2 W = W//2 C = C*4 assert H == W == 1 idx = torch.tensor(indices.ravel()) self.register_buffer('forward_shuffle_idx', nn.Parameter(idx, requires_grad=False)) self.register_buffer('backward_shuffle_idx', nn.Parameter(torch.argsort(idx), requires_grad=False)) def forward(self, x, reverse=False): if not reverse: return x[:, self.forward_shuffle_idx] else: return x[:, self.backward_shuffle_idx] def mortonify(i, j): """(i,j) index to linear morton code""" i = np.uint64(i) j = np.uint64(j) z = np.uint(0) for pos in range(32): z = (z | ((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) | ((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1)) ) return z class ZCurve(AbstractPermuter): def __init__(self, H, W): super().__init__() reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)] idx = np.argsort(reverseidx) idx = torch.tensor(idx) reverseidx = torch.tensor(reverseidx) self.register_buffer('forward_shuffle_idx', idx) self.register_buffer('backward_shuffle_idx', reverseidx) def forward(self, x, reverse=False): if not reverse: return x[:, self.forward_shuffle_idx] else: return x[:, self.backward_shuffle_idx] class SpiralOut(AbstractPermuter): def __init__(self, H, W): super().__init__() assert H == W size = W indices = np.arange(size*size).reshape(size,size) i0 = size//2 j0 = size//2-1 i = i0 j = j0 idx = [indices[i0, j0]] step_mult = 0 for c in range(1, size//2+1): step_mult += 1 # steps left for k in range(step_mult): i = i - 1 j = j idx.append(indices[i, j]) # step down for k in range(step_mult): i = i j = j + 1 idx.append(indices[i, j]) step_mult += 1 if c < size//2: # step right for k in range(step_mult): i = i + 1 j = j idx.append(indices[i, j]) # step up for k in range(step_mult): i = i j = j - 1 idx.append(indices[i, j]) else: # end reached for k in range(step_mult-1): i = i + 1 idx.append(indices[i, j]) assert len(idx) == size*size idx = torch.tensor(idx) self.register_buffer('forward_shuffle_idx', idx) self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) def forward(self, x, reverse=False): if not reverse: return x[:, self.forward_shuffle_idx] else: return x[:, self.backward_shuffle_idx] class SpiralIn(AbstractPermuter): def __init__(self, H, W): super().__init__() assert H == W size = W indices = np.arange(size*size).reshape(size,size) i0 = size//2 j0 = size//2-1 i = i0 j = j0 idx = [indices[i0, j0]] step_mult = 0 for c in range(1, size//2+1): step_mult += 1 # steps left for k in range(step_mult): i = i - 1 j = j idx.append(indices[i, j]) # step down for k in range(step_mult): i = i j = j + 1 idx.append(indices[i, j]) step_mult += 1 if c < size//2: # step right for k in range(step_mult): i = i + 1 j = j idx.append(indices[i, j]) # step up for k in range(step_mult): i = i j = j - 1 idx.append(indices[i, j]) else: # end reached for k in range(step_mult-1): i = i + 1 idx.append(indices[i, j]) assert len(idx) == size*size idx = idx[::-1] idx = torch.tensor(idx) self.register_buffer('forward_shuffle_idx', idx) self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) def forward(self, x, reverse=False): if not reverse: return x[:, self.forward_shuffle_idx] else: return x[:, self.backward_shuffle_idx] class Random(nn.Module): def __init__(self, H, W): super().__init__() indices = np.random.RandomState(1).permutation(H*W) idx = torch.tensor(indices.ravel()) self.register_buffer('forward_shuffle_idx', idx) self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) def forward(self, x, reverse=False): if not reverse: return x[:, self.forward_shuffle_idx] else: return x[:, self.backward_shuffle_idx] class AlternateParsing(AbstractPermuter): def __init__(self, H, W): super().__init__() indices = np.arange(W*H).reshape(H,W) for i in range(1, H, 2): indices[i, :] = indices[i, ::-1] idx = indices.flatten() assert len(idx) == H*W idx = torch.tensor(idx) self.register_buffer('forward_shuffle_idx', idx) self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) def forward(self, x, reverse=False): if not reverse: return x[:, self.forward_shuffle_idx] else: return x[:, self.backward_shuffle_idx] if __name__ == "__main__": p0 = AlternateParsing(16, 16) print(p0.forward_shuffle_idx) print(p0.backward_shuffle_idx) x = torch.randint(0, 768, size=(11, 256)) y = p0(x) xre = p0(y, reverse=True) assert torch.equal(x, xre) p1 = SpiralOut(2, 2) print(p1.forward_shuffle_idx) print(p1.backward_shuffle_idx)
7,093
27.48996
83
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/losses/lpips.py
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models""" import torch import torch.nn as nn from torchvision import models from collections import namedtuple from image_synthesis.taming.util import get_ckpt_path class LPIPS(nn.Module): # Learned perceptual metric def __init__(self, use_dropout=True): super().__init__() self.scaling_layer = ScalingLayer() self.chns = [64, 128, 256, 512, 512] # vg16 features self.net = vgg16(pretrained=True, requires_grad=False) self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) self.load_from_pretrained() for param in self.parameters(): param.requires_grad = False def load_from_pretrained(self, name="vgg_lpips"): ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips") self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) print("loaded pretrained LPIPS loss from {}".format(ckpt)) @classmethod def from_pretrained(cls, name="vgg_lpips"): model = cls() ckpt = get_ckpt_path(name) model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) return model def forward(self, input, target): in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target)) outs0, outs1 = self.net(in0_input), self.net(in1_input) feats0, feats1, diffs = {}, {}, {} lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] for kk in range(len(self.chns)): feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))] val = res[0] for l in range(1, len(self.chns)): val += res[l] return val class ScalingLayer(nn.Module): def __init__(self): super(ScalingLayer, self).__init__() self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) def forward(self, inp): return (inp - self.shift) / self.scale class NetLinLayer(nn.Module): """ A single linear layer which does a 1x1 conv """ def __init__(self, chn_in, chn_out=1, use_dropout=False): super(NetLinLayer, self).__init__() layers = [nn.Dropout(), ] if (use_dropout) else [] layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] self.model = nn.Sequential(*layers) class vgg16(torch.nn.Module): def __init__(self, requires_grad=False, pretrained=True): super(vgg16, self).__init__() vgg_pretrained_features = models.vgg16(pretrained=pretrained).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() self.N_slices = 5 for x in range(4): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(4, 9): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(9, 16): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(16, 23): self.slice4.add_module(str(x), vgg_pretrained_features[x]) for x in range(23, 30): self.slice5.add_module(str(x), vgg_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): h = self.slice1(X) h_relu1_2 = h h = self.slice2(h) h_relu2_2 = h h = self.slice3(h) h_relu3_3 = h h = self.slice4(h) h_relu4_3 = h h = self.slice5(h) h_relu5_3 = h vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) return out def normalize_tensor(x,eps=1e-10): norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True)) return x/(norm_factor+eps) def spatial_average(x, keepdim=True): return x.mean([2,3],keepdim=keepdim)
4,778
38.172131
104
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/losses/segmentation.py
import torch.nn as nn import torch.nn.functional as F class BCELoss(nn.Module): def forward(self, prediction, target): loss = F.binary_cross_entropy_with_logits(prediction,target) return loss, {} class BCELossWithQuant(nn.Module): def __init__(self, codebook_weight=1.): super().__init__() self.codebook_weight = codebook_weight def forward(self, qloss, target, prediction, split): bce_loss = F.binary_cross_entropy_with_logits(prediction,target) loss = bce_loss + self.codebook_weight*qloss return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/bce_loss".format(split): bce_loss.detach().mean(), "{}/quant_loss".format(split): qloss.detach().mean() }
816
34.521739
82
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/losses/vqperceptual.py
import torch import torch.nn as nn import torch.nn.functional as F from image_synthesis.taming.modules.losses.lpips import LPIPS from image_synthesis.taming.modules.discriminator.model import NLayerDiscriminator, weights_init class DummyLoss(nn.Module): def __init__(self): super().__init__() def adopt_weight(weight, global_step, threshold=0, value=0.): if global_step < threshold: weight = value return weight def hinge_d_loss(logits_real, logits_fake): loss_real = torch.mean(F.relu(1. - logits_real)) loss_fake = torch.mean(F.relu(1. + logits_fake)) d_loss = 0.5 * (loss_real + loss_fake) return d_loss def vanilla_d_loss(logits_real, logits_fake): d_loss = 0.5 * ( torch.mean(torch.nn.functional.softplus(-logits_real)) + torch.mean(torch.nn.functional.softplus(logits_fake))) return d_loss class VQLPIPSWithDiscriminator(nn.Module): def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_ndf=64, disc_loss="hinge"): super().__init__() assert disc_loss in ["hinge", "vanilla"] self.codebook_weight = codebook_weight self.pixel_weight = pixelloss_weight self.perceptual_loss = LPIPS().eval() self.perceptual_weight = perceptual_weight self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm, ndf=disc_ndf ).apply(weights_init) self.discriminator_iter_start = disc_start if disc_loss == "hinge": self.disc_loss = hinge_d_loss elif disc_loss == "vanilla": self.disc_loss = vanilla_d_loss else: raise ValueError(f"Unknown GAN loss '{disc_loss}'.") print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") self.disc_factor = disc_factor self.discriminator_weight = disc_weight self.disc_conditional = disc_conditional def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): if last_layer is not None: nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] else: nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() d_weight = d_weight * self.discriminator_weight return d_weight def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, global_step, last_layer=None, cond=None, split="train"): rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) if self.perceptual_weight > 0: p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) rec_loss = rec_loss + self.perceptual_weight * p_loss else: p_loss = torch.tensor([0.0]) nll_loss = rec_loss #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] nll_loss = torch.mean(nll_loss) # now the GAN part if optimizer_idx == 0: # generator update if cond is None: assert not self.disc_conditional logits_fake = self.discriminator(reconstructions.contiguous()) else: assert self.disc_conditional logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) g_loss = -torch.mean(logits_fake) try: d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) except RuntimeError: assert not self.training d_weight = torch.tensor(0.0) disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/quant_loss".format(split): codebook_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), "{}/rec_loss".format(split): rec_loss.detach().mean(), "{}/p_loss".format(split): p_loss.detach().mean(), "{}/d_weight".format(split): d_weight.detach(), "{}/disc_factor".format(split): torch.tensor(disc_factor), "{}/g_loss".format(split): g_loss.detach().mean(), } return loss, log if optimizer_idx == 1: # second pass for discriminator update if cond is None: logits_real = self.discriminator(inputs.contiguous().detach()) logits_fake = self.discriminator(reconstructions.contiguous().detach()) else: logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), "{}/logits_real".format(split): logits_real.detach().mean(), "{}/logits_fake".format(split): logits_fake.detach().mean() } return d_loss, log
6,211
44.343066
113
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/models/vqgan.py
import torch import torch.nn.functional as F import pytorch_lightning as pl from image_synthesis.utils.misc import instantiate_from_config from image_synthesis.taming.modules.diffusionmodules.model import Encoder, Decoder from image_synthesis.taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from image_synthesis.taming.modules.vqvae.quantize import GumbelQuantize class VQModel(pl.LightningModule): def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, remap=None, sane_index_shape=False, # tell vector quantizer to return indices as bhw ): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape) self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.image_key = image_key if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] self.load_state_dict(sd, strict=False) print(f"Restored from {path}") def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) quant, emb_loss, info = self.quantize(h) return quant, emb_loss, info def decode(self, quant): quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec def decode_code(self, code_b): quant_b = self.quantize.embed_code(code_b) dec = self.decode(quant_b) return dec def forward(self, input): quant, diff, _ = self.encode(input) dec = self.decode(quant) return dec, diff def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format) return x.float() def training_step(self, batch, batch_idx, optimizer_idx): x = self.get_input(batch, self.image_key) xrec, qloss = self(x) if optimizer_idx == 0: # autoencode aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("train/aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) return aeloss if optimizer_idx == 1: # discriminator discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("train/discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) return discloss def validation_step(self, batch, batch_idx): x = self.get_input(batch, self.image_key) xrec, qloss = self(x) aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split="val") discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split="val") rec_loss = log_dict_ae["val/rec_loss"] self.log("val/rec_loss", rec_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True) self.log("val/aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr = self.learning_rate opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ list(self.decoder.parameters())+ list(self.quantize.parameters())+ list(self.quant_conv.parameters())+ list(self.post_quant_conv.parameters()), lr=lr, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)) return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight def log_images(self, batch, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) xrec, _ = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) log["inputs"] = x log["reconstructions"] = xrec return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.*(x-x.min())/(x.max()-x.min()) - 1. return x class GumbelVQ(VQModel): def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, temperature_scheduler_config, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, kl_weight=1e-8, remap=None, ): z_channels = ddconfig["z_channels"] super().__init__(ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=ignore_keys, image_key=image_key, colorize_nlabels=colorize_nlabels, monitor=monitor, ) self.loss.n_classes = n_embed self.vocab_size = n_embed self.quantize = GumbelQuantize(z_channels, embed_dim, n_embed=n_embed, kl_weight=kl_weight, temp_init=1.0, remap=remap) self.temperature_scheduler = instantiate_from_config(temperature_scheduler_config) # annealing of temp if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def temperature_scheduling(self): self.quantize.temperature = self.temperature_scheduler(self.global_step) def encode_to_prequant(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode_code(self, code_b): raise NotImplementedError def training_step(self, batch, batch_idx, optimizer_idx): self.temperature_scheduling() x = self.get_input(batch, self.image_key) xrec, qloss = self(x) if optimizer_idx == 0: # autoencode aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) self.log("temperature", self.quantize.temperature, prog_bar=False, logger=True, on_step=True, on_epoch=True) return aeloss if optimizer_idx == 1: # discriminator discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) return discloss def validation_step(self, batch, batch_idx): x = self.get_input(batch, self.image_key) xrec, qloss = self(x, return_pred_indices=True) aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split="val") discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split="val") rec_loss = log_dict_ae["val/rec_loss"] self.log("val/rec_loss", rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) self.log("val/aeloss", aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def log_images(self, batch, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) # encode h = self.encoder(x) h = self.quant_conv(h) quant, _, _ = self.quantize(h) # decode x_rec = self.decode(quant) log["inputs"] = x log["reconstructions"] = x_rec return log
10,554
39.28626
120
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/models/cond_transformer.py
import os, math import torch import torch.nn.functional as F import pytorch_lightning as pl from image_synthesis.utils.misc import instantiate_from_config from image_synthesis.taming.modules.util import SOSProvider def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self class Net2NetTransformer(pl.LightningModule): def __init__(self, transformer_config, first_stage_config, cond_stage_config, permuter_config=None, ckpt_path=None, ignore_keys=[], first_stage_key="image", cond_stage_key="depth", downsample_cond_size=-1, pkeep=1.0, sos_token=0, unconditional=False, ): super().__init__() self.be_unconditional = unconditional self.sos_token = sos_token self.first_stage_key = first_stage_key self.cond_stage_key = cond_stage_key self.init_first_stage_from_ckpt(first_stage_config) self.init_cond_stage_from_ckpt(cond_stage_config) if permuter_config is None: permuter_config = {"target": "image_synthesis.taming.modules.transformer.permuter.Identity"} self.permuter = instantiate_from_config(config=permuter_config) self.transformer = instantiate_from_config(config=transformer_config) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.downsample_cond_size = downsample_cond_size self.pkeep = pkeep def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu")["state_dict"] for k in sd.keys(): for ik in ignore_keys: if k.startswith(ik): self.print("Deleting key {} from state_dict.".format(k)) del sd[k] self.load_state_dict(sd, strict=False) print(f"Restored from {path}") def init_first_stage_from_ckpt(self, config): model = instantiate_from_config(config) model = model.eval() model.train = disabled_train self.first_stage_model = model def init_cond_stage_from_ckpt(self, config): if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__" or self.be_unconditional: print(f"Using no cond stage. Assuming the training is intended to be unconditional. " f"Prepending {self.sos_token} as a sos token.") self.be_unconditional = True self.cond_stage_key = self.first_stage_key self.cond_stage_model = SOSProvider(self.sos_token) else: model = instantiate_from_config(config) model = model.eval() model.train = disabled_train self.cond_stage_model = model def forward(self, x, c): # one step to produce the logits _, z_indices = self.encode_to_z(x) _, c_indices = self.encode_to_c(c) if self.training and self.pkeep < 1.0: mask = torch.bernoulli(self.pkeep*torch.ones(z_indices.shape, device=z_indices.device)) mask = mask.round().to(dtype=torch.int64) r_indices = torch.randint_like(z_indices, self.transformer.config.vocab_size) a_indices = mask*z_indices+(1-mask)*r_indices else: a_indices = z_indices cz_indices = torch.cat((c_indices, a_indices), dim=1) # target includes all sequence elements (no need to handle first one # differently because we are conditioning) target = z_indices # make the prediction logits, _ = self.transformer(cz_indices[:, :-1]) # cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c) logits = logits[:, c_indices.shape[1]-1:] return logits, target def top_k_logits(self, logits, k): v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[..., [-1]]] = -float('Inf') return out @torch.no_grad() def sample(self, x, c, steps, temperature=1.0, sample=False, top_k=None, callback=lambda k: None): x = torch.cat((c,x),dim=1) block_size = self.transformer.get_block_size() assert not self.transformer.training if self.pkeep <= 0.0: # one pass suffices since input is pure noise anyway assert len(x.shape)==2 noise_shape = (x.shape[0], steps-1) #noise = torch.randint(self.transformer.config.vocab_size, noise_shape).to(x) noise = c.clone()[:,x.shape[1]-c.shape[1]:-1] x = torch.cat((x,noise),dim=1) logits, _ = self.transformer(x) # take all logits for now and scale by temp logits = logits / temperature # optionally crop probabilities to only the top k options if top_k is not None: logits = self.top_k_logits(logits, top_k) # apply softmax to convert to probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution or take the most likely if sample: shape = probs.shape probs = probs.reshape(shape[0]*shape[1],shape[2]) ix = torch.multinomial(probs, num_samples=1) probs = probs.reshape(shape[0],shape[1],shape[2]) ix = ix.reshape(shape[0],shape[1]) else: _, ix = torch.topk(probs, k=1, dim=-1) # cut off conditioning x = ix[:, c.shape[1]-1:] else: for k in range(steps): callback(k) assert x.size(1) <= block_size # make sure model can see conditioning x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed logits, _ = self.transformer(x_cond) # pluck the logits at the final step and scale by temperature logits = logits[:, -1, :] / temperature # optionally crop probabilities to only the top k options if top_k is not None: logits = self.top_k_logits(logits, top_k) # apply softmax to convert to probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution or take the most likely if sample: ix = torch.multinomial(probs, num_samples=1) else: _, ix = torch.topk(probs, k=1, dim=-1) # append to the sequence and continue x = torch.cat((x, ix), dim=1) # cut off conditioning x = x[:, c.shape[1]:] return x @torch.no_grad() def encode_to_z(self, x): quant_z, _, info = self.first_stage_model.encode(x) indices = info[2].view(quant_z.shape[0], -1) indices = self.permuter(indices) return quant_z, indices @torch.no_grad() def encode_to_c(self, c): if self.downsample_cond_size > -1: c = F.interpolate(c, size=(self.downsample_cond_size, self.downsample_cond_size)) quant_c, _, [_,_,indices] = self.cond_stage_model.encode(c) if len(indices.shape) > 2: indices = indices.view(c.shape[0], -1) return quant_c, indices @torch.no_grad() def decode_to_img(self, index, zshape): index = self.permuter(index, reverse=True) bhwc = (zshape[0],zshape[2],zshape[3],zshape[1]) quant_z = self.first_stage_model.quantize.get_codebook_entry( index.reshape(-1), shape=bhwc) x = self.first_stage_model.decode(quant_z) return x @torch.no_grad() def log_images(self, batch, temperature=None, top_k=None, callback=None, lr_interface=False, **kwargs): log = dict() N = 4 if lr_interface: x, c = self.get_xc(batch, N, diffuse=False, upsample_factor=8) else: x, c = self.get_xc(batch, N) x = x.to(device=self.device) c = c.to(device=self.device) quant_z, z_indices = self.encode_to_z(x) quant_c, c_indices = self.encode_to_c(c) # create a "half"" sample z_start_indices = z_indices[:,:z_indices.shape[1]//2] index_sample = self.sample(z_start_indices, c_indices, steps=z_indices.shape[1]-z_start_indices.shape[1], temperature=temperature if temperature is not None else 1.0, sample=True, top_k=top_k if top_k is not None else 100, callback=callback if callback is not None else lambda k: None) x_sample = self.decode_to_img(index_sample, quant_z.shape) # sample z_start_indices = z_indices[:, :0] index_sample = self.sample(z_start_indices, c_indices, steps=z_indices.shape[1], temperature=temperature if temperature is not None else 1.0, sample=True, top_k=top_k if top_k is not None else 100, callback=callback if callback is not None else lambda k: None) x_sample_nopix = self.decode_to_img(index_sample, quant_z.shape) # det sample z_start_indices = z_indices[:, :0] index_sample = self.sample(z_start_indices, c_indices, steps=z_indices.shape[1], sample=False, callback=callback if callback is not None else lambda k: None) x_sample_det = self.decode_to_img(index_sample, quant_z.shape) # reconstruction x_rec = self.decode_to_img(z_indices, quant_z.shape) log["inputs"] = x log["reconstructions"] = x_rec if self.cond_stage_key != "image": cond_rec = self.cond_stage_model.decode(quant_c) if self.cond_stage_key == "segmentation": # get image from segmentation mask num_classes = cond_rec.shape[1] c = torch.argmax(c, dim=1, keepdim=True) c = F.one_hot(c, num_classes=num_classes) c = c.squeeze(1).permute(0, 3, 1, 2).float() c = self.cond_stage_model.to_rgb(c) cond_rec = torch.argmax(cond_rec, dim=1, keepdim=True) cond_rec = F.one_hot(cond_rec, num_classes=num_classes) cond_rec = cond_rec.squeeze(1).permute(0, 3, 1, 2).float() cond_rec = self.cond_stage_model.to_rgb(cond_rec) log["conditioning_rec"] = cond_rec log["conditioning"] = c log["samples_half"] = x_sample log["samples_nopix"] = x_sample_nopix log["samples_det"] = x_sample_det return log def get_input(self, key, batch): x = batch[key] if len(x.shape) == 3: x = x[..., None] if len(x.shape) == 4: x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format) if x.dtype == torch.double: x = x.float() return x def get_xc(self, batch, N=None): x = self.get_input(self.first_stage_key, batch) c = self.get_input(self.cond_stage_key, batch) if N is not None: x = x[:N] c = c[:N] return x, c def shared_step(self, batch, batch_idx): x, c = self.get_xc(batch) logits, target = self(x, c) loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1)) return loss def training_step(self, batch, batch_idx): loss = self.shared_step(batch, batch_idx) self.log("train/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True) return loss def validation_step(self, batch, batch_idx): loss = self.shared_step(batch, batch_idx) self.log("val/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True) return loss def configure_optimizers(self): """ Following minGPT: This long function is unfortunately doing something very simple and is being very defensive: We are separating out all parameters of the model into two buckets: those that will experience weight decay for regularization and those that won't (biases, and layernorm/embedding weights). We are then returning the PyTorch optimizer object. """ # separate out all parameters to those that will and won't experience regularizing weight decay decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for mn, m in self.transformer.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) # special case the position embedding parameter in the root GPT module as not decayed no_decay.add('pos_emb') # validate that we considered every parameter param_dict = {pn: p for pn, p in self.transformer.named_parameters()} inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.95)) return optimizer
15,049
42.75
127
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_ffhq.py
import os string = "python train.py --name ffhq_train --config_file configs/ffhq.yaml --num_node 1 --tensorboard" os.system(string)
135
18.428571
103
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_tune_coco.py
import os string = "python train.py --name coco_tune --config_file configs/coco_tune.yaml --num_node 1 --tensorboard --load_path OUTPUT/pretrained_model/COCO_pretrained.pth" os.system(string)
195
27
163
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_imagenet.py
import os string = "python train.py --name imagenet_train --config_file configs/imagenet.yaml --num_node 1 --tensorboard" os.system(string)
143
19.571429
111
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_coco.py
import os string = "python train.py --name coco_train --config_file configs/coco.yaml --num_node 1 --tensorboard --load_path OUTPUT/pretrained_model/CC_pretrained.pth" os.system(string)
189
26.142857
157
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_cub.py
import os string = "python train.py --name cub200_train --config_file configs/cub200.yaml --num_node 1 --tensorboard --load_path OUTPUT/pretrained_model/CC_pretrained.pth" os.system(string)
193
26.714286
161
py
Reflect
Reflect-master/mnist_trainer.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags from util.models import MODELS from util.tasks import TASKS FLAGS = flags.FLAGS flags.DEFINE_string('exp_name', 'trial1', 'experiment directory') flags.DEFINE_string('task', 'word_sv_agreement_lm', 'sv_agreement_lm | word_sv_agreement_lm') flags.DEFINE_string('model', 'lm_lstm', 'lm_lstm | lm_gpt2 | lm_gpt2_shared | lm_lstm_shared_emb | cl_gpt2_shared | cl_gpt2 | cl_lstm') flags.DEFINE_string('model_config', 'base', 'base | small_lstm ') flags.DEFINE_string('train_config', 'radam_fast', 'radam_slow | radam_fast') flags.DEFINE_integer('keep_checkpoint_every_n_hours',None, 'keep_checkpoint_every_n_hours passed to training manager') flags.DEFINE_integer('batch_size',16, 'batch_size') hparams = flags.FLAGS def run(): gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Currently, memory growth needs to be the same across GPUs try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) except RuntimeError as e: print(e) strategy = tf.distribute.MirroredStrategy() log_dir = "logs" chkpt_dir = "tf_ckpts" # Create task with strategy.scope(): task = TASKS[hparams.task](get_task_params()) # Create the Model model_params = get_model_params(task,hparams.model, hparams.model_config) print("model_params: ", model_params.__dict__) model = MODELS[hparams.model](hparams=get_model_params(task,hparams.model, hparams.model_config)) trainer_params = get_train_params(hparams.train_config) log_dir = os.path.join(log_dir,task.name, model.model_name+"_"+str(hparams.model_config)+"_"+str(trainer_params.learning_rate)+"_"+hparams.exp_name) ckpt_dir = os.path.join(chkpt_dir,task.name, model.model_name+"_"+str(hparams.model_config)+"_"+str(trainer_params.learning_rate)+"_"+hparams.exp_name) # Create task trainer = Trainer(hparams, strategy=strategy, task=task, model=model, train_params=trainer_params, log_dir=log_dir, ckpt_dir=ckpt_dir) trainer.restore() trainer.train() def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') run() if __name__ == '__main__': app.run(main)
2,498
31.454545
153
py
Reflect
Reflect-master/keras_trainer.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags from util.models import MODELS from util.tasks import TASKS FLAGS = flags.FLAGS flags.DEFINE_string('exp_name', 'trial1', 'experiment directory') flags.DEFINE_string('task', 'word_sv_agreement_lm', 'sv_agreement_lm | word_sv_agreement_lm') flags.DEFINE_string('model', 'lm_lstm', 'lm_lstm | lm_gpt2 | lm_gpt2_shared | lm_lstm_shared_emb | cl_gpt2_shared | cl_gpt2 | cl_lstm') flags.DEFINE_string('model_config', 'base', 'base | small_lstm ') flags.DEFINE_string('train_config', 'radam_fast', 'radam_slow | radam_fast') flags.DEFINE_integer('keep_checkpoint_every_n_hours',None, 'keep_checkpoint_every_n_hours passed to training manager') flags.DEFINE_integer('batch_size', 64, 'batch_size') hparams = flags.FLAGS def run(): gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Currently, memory growth needs to be the same across GPUs try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) except RuntimeError as e: print(e) log_dir = "logs" chkpt_dir = "tf_ckpts" strategy = tf.distribute.MirroredStrategy() # Create task with strategy.scope(): task = TASKS[hparams.task](get_task_params(batch_size=hparams.batch_size, num_replicas_in_sync=strategy.num_replicas_in_sync)) # Create the Model model_params = get_model_params(task,hparams.model, hparams.model_config) print("model_params: ", model_params.__dict__) cl_token = task.sentence_encoder().encode(constants.bos) model = MODELS[hparams.model](hparams=get_model_params(task,hparams.model, hparams.model_config),cl_token=cl_token) trainer_params = get_train_params(hparams.train_config) log_dir = os.path.join(log_dir,task.name, model.model_name+"_"+str(hparams.model_config)+"_"+str(trainer_params.learning_rate)+"_"+hparams.exp_name) ckpt_dir = os.path.join(chkpt_dir,task.name, model.model_name+"_"+str(hparams.model_config)+"_"+str(trainer_params.learning_rate)+"_"+hparams.exp_name) trainer = Trainer(hparams, strategy=strategy, task=task, model=model, train_params=trainer_params, log_dir=log_dir, ckpt_dir=ckpt_dir) trainer.restore() trainer.train() def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') run() if __name__ == '__main__': app.run(main)
2,688
34.381579
153
py
Reflect
Reflect-master/util/text_util.py
from collections import Counter import csv import subprocess from util import inflect import pandas as pd from statsmodels.stats.proportion import proportion_confint infl_eng = inflect.engine() dependency_fields = ['sentence', 'orig_sentence', 'pos_sentence', 'subj', 'verb', 'subj_pos', 'has_rel', 'has_nsubj', 'verb_pos', 'subj_index', 'verb_index', 'n_intervening', 'last_intervening', 'n_diff_intervening', 'distance', 'max_depth', 'all_nouns', 'nouns_up_to_verb'] def deps_to_tsv(deps, outfile): writer = csv.writer(open(outfile, 'w'), delimiter='\t') writer.writerow(dependency_fields) for dep in deps: writer.writerow([dep[key] for key in dependency_fields]) def deps_from_tsv(infile, limit=None): res = [] for i, d in enumerate(csv.DictReader(open(infile), delimiter='\t')): if limit is not None and i >= limit: break res.append({x: int(y) if y.isdigit() else y for x, y in d.items()}) return res def zread(fname): p = subprocess.Popen(['gunzip', '-c', fname], stdout=subprocess.PIPE) for line in p.stdout: yield line p.wait() def tokenize_blanks(fh): sent = [] for line in fh: line = line.strip().split() if not line: if sent: yield sent sent = [] else: sent.append(line) yield sent def create_freq_dict(infile, outfile, minfreq=50): d = Counter() for i, line in enumerate(zread(infile)): stripped = line.strip() if stripped: s = stripped.split() d[s[1], s[3]] += 1 if i % 1000000 == 0: print(i) outfile = file(outfile, 'w') for (w, pos), count in d.iteritems(): if count > minfreq: outfile.write('%s\t%s\t%d\n' % (w, pos, count)) def confint(row): n_errors = int(row['errorprob'] * row['count']) return proportion_confint(n_errors, row['count']) def add_confints(df): df['minconf'] = df.apply(lambda row: confint(row)[0], axis=1) df['maxconf'] = df.apply(lambda row: confint(row)[1], axis=1) def get_grouping(df, grouping_vars): funcs = {'correct': {'accuracy': 'mean', 'count': 'count'}, 'distance': {'mean_distance': 'mean'}} x = df.groupby(grouping_vars).aggregate(funcs) x.columns = x.columns.droplevel() x = x.reset_index() x['errorprob'] = 1 - x['accuracy'] add_confints(x) return x def gen_inflect_from_vocab(vocab_file, freq_threshold=1000): vbp = {} vbz = {} nn = {} nns = {} from_pos = {'NNS': nns, 'NN': nn, 'VBP': vbp, 'VBZ': vbz} for line in open(vocab_file): if line.startswith(' '): # empty string token continue word, pos, count = line.strip().split() count = int(count) if len(word) > 1 and pos in from_pos and count >= freq_threshold: from_pos[pos][word] = count verb_infl = {'VBP': 'VBZ', 'VBZ': 'VBP'} for word, count in vbz.items(): candidate = infl_eng.plural_verb(word) if candidate in vbp: verb_infl[candidate] = word verb_infl[word] = candidate noun_infl = {'NN': 'NNS', 'NNS': 'NN'} for word, count in nn.items(): candidate = infl_eng.plural_noun(word) if candidate in nns: noun_infl[candidate] = word noun_infl[word] = candidate return verb_infl, noun_infl def annotate_relpron(df): pd.options.mode.chained_assignment = None def f(x): blacklist = set(['NNP', 'PRP']) relprons = set(['WDT', 'WP', 'WRB', 'WP$']) vi = x['verb_index'] - 1 words_in_dep = x['orig_sentence'].split()[x['subj_index']:vi] pos_in_dep = x['pos_sentence'].split()[x['subj_index']:vi] first_is_that = words_in_dep[:1] == ['that'] return (bool(blacklist & set(pos_in_dep)), bool(relprons & set(pos_in_dep[:2])) | first_is_that, bool(relprons & set(pos_in_dep)) | first_is_that) df['blacklisted'], df['has_early_relpron'], df['has_relpron'] = \ zip(*df.apply(f, axis=1)) df['has_early_relpron'] = True def g(x): if x['has_rel'] and x['has_relpron'] and x['has_early_relpron']: return 'With relativizer' elif x['has_rel'] and not x['has_relpron']: return 'Without relativizer' elif not x['has_rel']: if x['has_relpron']: return 'Error' else: return 'No relative clause' else: return 'Error' df['condition'] = df.apply(g, axis=1) return df
4,707
30.178808
77
py
Reflect
Reflect-master/util/constants.py
pad = '<pad>' unk = '<unk>' bos = '<bos>' eos = '<eos>' pad_idx = 0 unk_idx = 1 bos_idx = 2 eos_idx = 3 all = [pad, unk, bos, eos]
132
11.090909
26
py
Reflect
Reflect-master/util/inflect.py
''' inflect.py: correctly generate plurals, ordinals, indefinite articles; convert numbers to words Copyright (C) 2010 Paul Dyson Based upon the Perl module Lingua::EN::Inflect by Damian Conway. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. The original Perl module Lingua::EN::Inflect by Damian Conway is available from http://search.cpan.org/~dconway/ This module can be downloaded at http://pypi.python.org/pypi/inflect methods: classical inflect plural plural_noun plural_verb plural_adj singular_noun no num a an compare compare_nouns compare_verbs compare_adjs present_participle ordinal number_to_words join defnoun defverb defadj defa defan INFLECTIONS: classical inflect plural plural_noun plural_verb plural_adj singular_noun compare no num a an present_participle PLURALS: classical inflect plural plural_noun plural_verb plural_adj singular_noun no num compare compare_nouns compare_verbs compare_adjs COMPARISONS: classical compare compare_nouns compare_verbs compare_adjs ARTICLES: classical inflect num a an NUMERICAL: ordinal number_to_words USER_DEFINED: defnoun defverb defadj defa defan Exceptions: UnknownClassicalModeError BadNumValueError BadChunkingOptionError NumOutOfRangeError BadUserDefinedPatternError BadRcFileError BadGenderError ''' from re import match, search, subn, IGNORECASE, VERBOSE from re import split as splitre from re import error as reerror from re import sub as resub class UnknownClassicalModeError(Exception): pass class BadNumValueError(Exception): pass class BadChunkingOptionError(Exception): pass class NumOutOfRangeError(Exception): pass class BadUserDefinedPatternError(Exception): pass class BadRcFileError(Exception): pass class BadGenderError(Exception): pass __ver_major__ = 0 __ver_minor__ = 2 __ver_patch__ = 5 __ver_sub__ = "" __version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__, __ver_patch__, __ver_sub__) STDOUT_ON = False def print3(txt): if STDOUT_ON: print(txt) def enclose(s): return "(?:%s)" % s def joinstem(cutpoint=0, words=''): ''' join stem of each word in words into a string for regex each word is truncated at cutpoint cutpoint is usually negative indicating the number of letters to remove from the end of each word e.g. joinstem(-2, ["ephemeris", "iris", ".*itis"]) returns (?:ephemer|ir|.*it) ''' return enclose('|'.join(w[:cutpoint] for w in words)) def bysize(words): ''' take a list of words and return a dict of sets sorted by word length e.g. ret[3]=set(['ant', 'cat', 'dog', 'pig']) ret[4]=set(['frog', 'goat']) ret[5]=set(['horse']) ret[8]=set(['elephant']) ''' ret = {} for w in words: if len(w) not in ret: ret[len(w)] = set() ret[len(w)].add(w) return ret def make_pl_si_lists(lst, plending, siendingsize, dojoinstem=True): ''' given a list of singular words: lst an ending to append to make the plural: plending the number of characters to remove from the singular before appending plending: siendingsize a flag whether to create a joinstem: dojoinstem return: a list of pluralised words: si_list (called si because this is what you need to look for to make the singular) the pluralised words as a dict of sets sorted by word length: si_bysize the singular words as a dict of sets sorted by word length: pl_bysize if dojoinstem is True: a regular expression that matches any of the stems: stem ''' if siendingsize is not None: siendingsize = -siendingsize si_list = [w[:siendingsize] + plending for w in lst] pl_bysize = bysize(lst) si_bysize = bysize(si_list) if dojoinstem: stem = joinstem(siendingsize, lst) return si_list, si_bysize, pl_bysize, stem else: return si_list, si_bysize, pl_bysize # 1. PLURALS pl_sb_irregular_s = { "corpus": "corpuses|corpora", "opus": "opuses|opera", "genus": "genera", "mythos": "mythoi", "penis": "penises|penes", "testis": "testes", "atlas": "atlases|atlantes", "yes": "yeses", } pl_sb_irregular = { "child": "children", "brother": "brothers|brethren", "loaf": "loaves", "hoof": "hoofs|hooves", "beef": "beefs|beeves", "thief": "thiefs|thieves", "money": "monies", "mongoose": "mongooses", "ox": "oxen", "cow": "cows|kine", "graffito": "graffiti", "octopus": "octopuses|octopodes", "genie": "genies|genii", "ganglion": "ganglions|ganglia", "trilby": "trilbys", "turf": "turfs|turves", "numen": "numina", "atman": "atmas", "occiput": "occiputs|occipita", "sabretooth": "sabretooths", "sabertooth": "sabertooths", "lowlife": "lowlifes", "flatfoot": "flatfoots", "tenderfoot": "tenderfoots", "romany": "romanies", "jerry": "jerries", "mary": "maries", "talouse": "talouses", "blouse": "blouses", "rom": "roma", "carmen": "carmina", } pl_sb_irregular.update(pl_sb_irregular_s) # pl_sb_irregular_keys = enclose('|'.join(pl_sb_irregular.keys())) pl_sb_irregular_caps = { 'Romany': 'Romanies', 'Jerry': 'Jerrys', 'Mary': 'Marys', 'Rom': 'Roma', } pl_sb_irregular_compound = { "prima donna": "prima donnas|prime donne", } si_sb_irregular = dict([(v, k) for (k, v) in pl_sb_irregular.items()]) keys = list(si_sb_irregular.keys()) for k in keys: if '|' in k: k1, k2 = k.split('|') si_sb_irregular[k1] = si_sb_irregular[k2] = si_sb_irregular[k] del si_sb_irregular[k] si_sb_irregular_caps = dict([(v, k) for (k, v) in pl_sb_irregular_caps.items()]) si_sb_irregular_compound = dict([(v, k) for (k, v) in pl_sb_irregular_compound.items()]) keys = list(si_sb_irregular_compound.keys()) for k in keys: if '|' in k: k1, k2 = k.split('|') si_sb_irregular_compound[k1] = si_sb_irregular_compound[k2] = si_sb_irregular_compound[k] del si_sb_irregular_compound[k] # si_sb_irregular_keys = enclose('|'.join(si_sb_irregular.keys())) # Z's that don't double pl_sb_z_zes_list = ( "quartz", "topaz", ) pl_sb_z_zes_bysize = bysize(pl_sb_z_zes_list) pl_sb_ze_zes_list = ('snooze',) pl_sb_ze_zes_bysize = bysize(pl_sb_ze_zes_list) # CLASSICAL "..is" -> "..ides" pl_sb_C_is_ides_complete = [ # GENERAL WORDS... "ephemeris", "iris", "clitoris", "chrysalis", "epididymis", ] pl_sb_C_is_ides_endings = [ # INFLAMATIONS... "itis", ] pl_sb_C_is_ides = joinstem(-2, pl_sb_C_is_ides_complete + ['.*%s' % w for w in pl_sb_C_is_ides_endings]) pl_sb_C_is_ides_list = pl_sb_C_is_ides_complete + pl_sb_C_is_ides_endings (si_sb_C_is_ides_list, si_sb_C_is_ides_bysize, pl_sb_C_is_ides_bysize) = make_pl_si_lists(pl_sb_C_is_ides_list, 'ides', 2, dojoinstem=False) # CLASSICAL "..a" -> "..ata" pl_sb_C_a_ata_list = ( "anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema", "enigma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma", "schema", "soma", "stigma", "stoma", "trauma", "gumma", "pragma", ) (si_sb_C_a_ata_list, si_sb_C_a_ata_bysize, pl_sb_C_a_ata_bysize, pl_sb_C_a_ata) = make_pl_si_lists(pl_sb_C_a_ata_list, 'ata', 1) # UNCONDITIONAL "..a" -> "..ae" pl_sb_U_a_ae_list = ( "alumna", "alga", "vertebra", "persona" ) (si_sb_U_a_ae_list, si_sb_U_a_ae_bysize, pl_sb_U_a_ae_bysize, pl_sb_U_a_ae) = make_pl_si_lists(pl_sb_U_a_ae_list, 'e', None) # CLASSICAL "..a" -> "..ae" pl_sb_C_a_ae_list = ( "amoeba", "antenna", "formula", "hyperbola", "medusa", "nebula", "parabola", "abscissa", "hydra", "nova", "lacuna", "aurora", "umbra", "flora", "fauna", ) (si_sb_C_a_ae_list, si_sb_C_a_ae_bysize, pl_sb_C_a_ae_bysize, pl_sb_C_a_ae) = make_pl_si_lists(pl_sb_C_a_ae_list, 'e', None) # CLASSICAL "..en" -> "..ina" pl_sb_C_en_ina_list = ( "stamen", "foramen", "lumen", ) (si_sb_C_en_ina_list, si_sb_C_en_ina_bysize, pl_sb_C_en_ina_bysize, pl_sb_C_en_ina) = make_pl_si_lists(pl_sb_C_en_ina_list, 'ina', 2) # UNCONDITIONAL "..um" -> "..a" pl_sb_U_um_a_list = ( "bacterium", "agendum", "desideratum", "erratum", "stratum", "datum", "ovum", "extremum", "candelabrum", ) (si_sb_U_um_a_list, si_sb_U_um_a_bysize, pl_sb_U_um_a_bysize, pl_sb_U_um_a) = make_pl_si_lists(pl_sb_U_um_a_list, 'a', 2) # CLASSICAL "..um" -> "..a" pl_sb_C_um_a_list = ( "maximum", "minimum", "momentum", "optimum", "quantum", "cranium", "curriculum", "dictum", "phylum", "aquarium", "compendium", "emporium", "enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "memorandum", "millennium", "rostrum", "spectrum", "speculum", "stadium", "trapezium", "ultimatum", "medium", "vacuum", "velum", "consortium", "arboretum", ) (si_sb_C_um_a_list, si_sb_C_um_a_bysize, pl_sb_C_um_a_bysize, pl_sb_C_um_a) = make_pl_si_lists(pl_sb_C_um_a_list, 'a', 2) # UNCONDITIONAL "..us" -> "i" pl_sb_U_us_i_list = ( "alumnus", "alveolus", "bacillus", "bronchus", "locus", "nucleus", "stimulus", "meniscus", "sarcophagus", ) (si_sb_U_us_i_list, si_sb_U_us_i_bysize, pl_sb_U_us_i_bysize, pl_sb_U_us_i) = make_pl_si_lists(pl_sb_U_us_i_list, 'i', 2) # CLASSICAL "..us" -> "..i" pl_sb_C_us_i_list = ( "focus", "radius", "genius", "incubus", "succubus", "nimbus", "fungus", "nucleolus", "stylus", "torus", "umbilicus", "uterus", "hippopotamus", "cactus", ) (si_sb_C_us_i_list, si_sb_C_us_i_bysize, pl_sb_C_us_i_bysize, pl_sb_C_us_i) = make_pl_si_lists(pl_sb_C_us_i_list, 'i', 2) # CLASSICAL "..us" -> "..us" (ASSIMILATED 4TH DECLENSION LATIN NOUNS) pl_sb_C_us_us = ( "status", "apparatus", "prospectus", "sinus", "hiatus", "impetus", "plexus", ) pl_sb_C_us_us_bysize = bysize(pl_sb_C_us_us) # UNCONDITIONAL "..on" -> "a" pl_sb_U_on_a_list = ( "criterion", "perihelion", "aphelion", "phenomenon", "prolegomenon", "noumenon", "organon", "asyndeton", "hyperbaton", ) (si_sb_U_on_a_list, si_sb_U_on_a_bysize, pl_sb_U_on_a_bysize, pl_sb_U_on_a) = make_pl_si_lists(pl_sb_U_on_a_list, 'a', 2) # CLASSICAL "..on" -> "..a" pl_sb_C_on_a_list = ( "oxymoron", ) (si_sb_C_on_a_list, si_sb_C_on_a_bysize, pl_sb_C_on_a_bysize, pl_sb_C_on_a) = make_pl_si_lists(pl_sb_C_on_a_list, 'a', 2) # CLASSICAL "..o" -> "..i" (BUT NORMALLY -> "..os") pl_sb_C_o_i = [ "solo", "soprano", "basso", "alto", "contralto", "tempo", "piano", "virtuoso", ] # list not tuple so can concat for pl_sb_U_o_os pl_sb_C_o_i_bysize = bysize(pl_sb_C_o_i) si_sb_C_o_i_bysize = bysize(['%si' % w[:-1] for w in pl_sb_C_o_i]) pl_sb_C_o_i_stems = joinstem(-1, pl_sb_C_o_i) # ALWAYS "..o" -> "..os" pl_sb_U_o_os_complete = set(( "ado", "ISO", "NATO", "NCO", "NGO", "oto", )) si_sb_U_o_os_complete = set('%ss' % w for w in pl_sb_U_o_os_complete) pl_sb_U_o_os_endings = [ "aficionado", "aggro", "albino", "allegro", "ammo", "Antananarivo", "archipelago", "armadillo", "auto", "avocado", "Bamako", "Barquisimeto", "bimbo", "bingo", "Biro", "bolero", "Bolzano", "bongo", "Boto", "burro", "Cairo", "canto", "cappuccino", "casino", "cello", "Chicago", "Chimango", "cilantro", "cochito", "coco", "Colombo", "Colorado", "commando", "concertino", "contango", "credo", "crescendo", "cyano", "demo", "ditto", "Draco", "dynamo", "embryo", "Esperanto", "espresso", "euro", "falsetto", "Faro", "fiasco", "Filipino", "flamenco", "furioso", "generalissimo", "Gestapo", "ghetto", "gigolo", "gizmo", "Greensboro", "gringo", "Guaiabero", "guano", "gumbo", "gyro", "hairdo", "hippo", "Idaho", "impetigo", "inferno", "info", "intermezzo", "intertrigo", "Iquico", "jumbo", "junto", "Kakapo", "kilo", "Kinkimavo", "Kokako", "Kosovo", "Lesotho", "libero", "libido", "libretto", "lido", "Lilo", "limbo", "limo", "lineno", "lingo", "lino", "livedo", "loco", "logo", "lumbago", "macho", "macro", "mafioso", "magneto", "magnifico", "Majuro", "Malabo", "manifesto", "Maputo", "Maracaibo", "medico", "memo", "metro", "Mexico", "micro", "Milano", "Monaco", "mono", "Montenegro", "Morocco", "Muqdisho", "myo", "neutrino", "Ningbo", "octavo", "oregano", "Orinoco", "Orlando", "Oslo", "panto", "Paramaribo", "Pardusco", "pedalo", "photo", "pimento", "pinto", "pleco", "Pluto", "pogo", "polo", "poncho", "Porto-Novo", "Porto", "pro", "psycho", "pueblo", "quarto", "Quito", "rhino", "risotto", "rococo", "rondo", "Sacramento", "saddo", "sago", "salvo", "Santiago", "Sapporo", "Sarajevo", "scherzando", "scherzo", "silo", "sirocco", "sombrero", "staccato", "sterno", "stucco", "stylo", "sumo", "Taiko", "techno", "terrazzo", "testudo", "timpano", "tiro", "tobacco", "Togo", "Tokyo", "torero", "Torino", "Toronto", "torso", "tremolo", "typo", "tyro", "ufo", "UNESCO", "vaquero", "vermicello", "verso", "vibrato", "violoncello", "Virgo", "weirdo", "WHO", "WTO", "Yamoussoukro", "yo-yo", "zero", "Zibo", ] + pl_sb_C_o_i pl_sb_U_o_os_bysize = bysize(pl_sb_U_o_os_endings) si_sb_U_o_os_bysize = bysize(['%ss' % w for w in pl_sb_U_o_os_endings]) # UNCONDITIONAL "..ch" -> "..chs" pl_sb_U_ch_chs_list = ( "czech", "eunuch", "stomach" ) (si_sb_U_ch_chs_list, si_sb_U_ch_chs_bysize, pl_sb_U_ch_chs_bysize, pl_sb_U_ch_chs) = make_pl_si_lists(pl_sb_U_ch_chs_list, 's', None) # UNCONDITIONAL "..[ei]x" -> "..ices" pl_sb_U_ex_ices_list = ( "codex", "murex", "silex", ) (si_sb_U_ex_ices_list, si_sb_U_ex_ices_bysize, pl_sb_U_ex_ices_bysize, pl_sb_U_ex_ices) = make_pl_si_lists(pl_sb_U_ex_ices_list, 'ices', 2) pl_sb_U_ix_ices_list = ( "radix", "helix", ) (si_sb_U_ix_ices_list, si_sb_U_ix_ices_bysize, pl_sb_U_ix_ices_bysize, pl_sb_U_ix_ices) = make_pl_si_lists(pl_sb_U_ix_ices_list, 'ices', 2) # CLASSICAL "..[ei]x" -> "..ices" pl_sb_C_ex_ices_list = ( "vortex", "vertex", "cortex", "latex", "pontifex", "apex", "index", "simplex", ) (si_sb_C_ex_ices_list, si_sb_C_ex_ices_bysize, pl_sb_C_ex_ices_bysize, pl_sb_C_ex_ices) = make_pl_si_lists(pl_sb_C_ex_ices_list, 'ices', 2) pl_sb_C_ix_ices_list = ( "appendix", ) (si_sb_C_ix_ices_list, si_sb_C_ix_ices_bysize, pl_sb_C_ix_ices_bysize, pl_sb_C_ix_ices) = make_pl_si_lists(pl_sb_C_ix_ices_list, 'ices', 2) # ARABIC: ".." -> "..i" pl_sb_C_i_list = ( "afrit", "afreet", "efreet", ) (si_sb_C_i_list, si_sb_C_i_bysize, pl_sb_C_i_bysize, pl_sb_C_i) = make_pl_si_lists(pl_sb_C_i_list, 'i', None) # HEBREW: ".." -> "..im" pl_sb_C_im_list = ( "goy", "seraph", "cherub", ) (si_sb_C_im_list, si_sb_C_im_bysize, pl_sb_C_im_bysize, pl_sb_C_im) = make_pl_si_lists(pl_sb_C_im_list, 'im', None) # UNCONDITIONAL "..man" -> "..mans" pl_sb_U_man_mans_list = """ ataman caiman cayman ceriman desman dolman farman harman hetman human leman ottoman shaman talisman """.split() pl_sb_U_man_mans_caps_list = """ Alabaman Bahaman Burman German Hiroshiman Liman Nakayaman Norman Oklahoman Panaman Roman Selman Sonaman Tacoman Yakiman Yokohaman Yuman """.split() (si_sb_U_man_mans_list, si_sb_U_man_mans_bysize, pl_sb_U_man_mans_bysize) = make_pl_si_lists(pl_sb_U_man_mans_list, 's', None, dojoinstem=False) (si_sb_U_man_mans_caps_list, si_sb_U_man_mans_caps_bysize, pl_sb_U_man_mans_caps_bysize) = make_pl_si_lists(pl_sb_U_man_mans_caps_list, 's', None, dojoinstem=False) pl_sb_uninflected_s_complete = [ # PAIRS OR GROUPS SUBSUMED TO A SINGULAR... "breeches", "britches", "pajamas", "pyjamas", "clippers", "gallows", "hijinks", "headquarters", "pliers", "scissors", "testes", "herpes", "pincers", "shears", "proceedings", "trousers", # UNASSIMILATED LATIN 4th DECLENSION "cantus", "coitus", "nexus", # RECENT IMPORTS... "contretemps", "corps", "debris", "siemens", # DISEASES "mumps", # MISCELLANEOUS OTHERS... "diabetes", "jackanapes", "series", "species", "subspecies", "rabies", "chassis", "innings", "news", "mews", "haggis", ] pl_sb_uninflected_s_endings = [ # RECENT IMPORTS... "ois", # DISEASES "measles", ] pl_sb_uninflected_s = pl_sb_uninflected_s_complete + ['.*%s' % w for w in pl_sb_uninflected_s_endings] pl_sb_uninflected_herd = ( # DON'T INFLECT IN CLASSICAL MODE, OTHERWISE NORMAL INFLECTION "wildebeest", "swine", "eland", "bison", "buffalo", "elk", "rhinoceros", 'zucchini', 'caribou', 'dace', 'grouse', 'guinea fowl', 'guinea-fowl', 'haddock', 'hake', 'halibut', 'herring', 'mackerel', 'pickerel', 'pike', 'roe', 'seed', 'shad', 'snipe', 'teal', 'turbot', 'water fowl', 'water-fowl', ) pl_sb_uninflected_complete = [ # SOME FISH AND HERD ANIMALS "tuna", "salmon", "mackerel", "trout", "bream", "sea-bass", "sea bass", "carp", "cod", "flounder", "whiting", "moose", # OTHER ODDITIES "graffiti", "djinn", 'samuri', 'offspring', 'pence', 'quid', 'hertz', ] + pl_sb_uninflected_s_complete # SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE) pl_sb_uninflected_caps = [ # ALL NATIONALS ENDING IN -ese "Portuguese", "Amoyese", "Borghese", "Congoese", "Faroese", "Foochowese", "Genevese", "Genoese", "Gilbertese", "Hottentotese", "Kiplingese", "Kongoese", "Lucchese", "Maltese", "Nankingese", "Niasese", "Pekingese", "Piedmontese", "Pistoiese", "Sarawakese", "Shavese", "Vermontese", "Wenchowese", "Yengeese", ] pl_sb_uninflected_endings = [ # SOME FISH AND HERD ANIMALS "fish", "deer", "sheep", # ALL NATIONALS ENDING IN -ese "nese", "rese", "lese", "mese", # DISEASES "pox", # OTHER ODDITIES 'craft', ] + pl_sb_uninflected_s_endings # SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE) pl_sb_uninflected_bysize = bysize(pl_sb_uninflected_endings) # SINGULAR WORDS ENDING IN ...s (ALL INFLECT WITH ...es) pl_sb_singular_s_complete = [ "acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "bronchitis", "bursitis", "caddis", "cannabis", "canvas", "chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "eyas", "gas", "glottis", "hubris", "ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros", "sassafras", "trellis", ] + pl_sb_C_is_ides_complete pl_sb_singular_s_endings = [ "ss", "us", ] + pl_sb_C_is_ides_endings pl_sb_singular_s_bysize = bysize(pl_sb_singular_s_endings) si_sb_singular_s_complete = ['%ses' % w for w in pl_sb_singular_s_complete] si_sb_singular_s_endings = ['%ses' % w for w in pl_sb_singular_s_endings] si_sb_singular_s_bysize = bysize(si_sb_singular_s_endings) pl_sb_singular_s_es = [ "[A-Z].*es", ] pl_sb_singular_s = enclose('|'.join(pl_sb_singular_s_complete + ['.*%s' % w for w in pl_sb_singular_s_endings] + pl_sb_singular_s_es)) # PLURALS ENDING IN uses -> use si_sb_ois_oi_case = ( 'Bolshois', 'Hanois' ) si_sb_uses_use_case = ( 'Betelgeuses', 'Duses', 'Meuses', 'Syracuses', 'Toulouses', ) si_sb_uses_use = ( 'abuses', 'applauses', 'blouses', 'carouses', 'causes', 'chartreuses', 'clauses', 'contuses', 'douses', 'excuses', 'fuses', 'grouses', 'hypotenuses', 'masseuses', 'menopauses', 'misuses', 'muses', 'overuses', 'pauses', 'peruses', 'profuses', 'recluses', 'reuses', 'ruses', 'souses', 'spouses', 'suffuses', 'transfuses', 'uses', ) si_sb_ies_ie_case = ( 'Addies', 'Aggies', 'Allies', 'Amies', 'Angies', 'Annies', 'Annmaries', 'Archies', 'Arties', 'Aussies', 'Barbies', 'Barries', 'Basies', 'Bennies', 'Bernies', 'Berties', 'Bessies', 'Betties', 'Billies', 'Blondies', 'Bobbies', 'Bonnies', 'Bowies', 'Brandies', 'Bries', 'Brownies', 'Callies', 'Carnegies', 'Carries', 'Cassies', 'Charlies', 'Cheries', 'Christies', 'Connies', 'Curies', 'Dannies', 'Debbies', 'Dixies', 'Dollies', 'Donnies', 'Drambuies', 'Eddies', 'Effies', 'Ellies', 'Elsies', 'Eries', 'Ernies', 'Essies', 'Eugenies', 'Fannies', 'Flossies', 'Frankies', 'Freddies', 'Gillespies', 'Goldies', 'Gracies', 'Guthries', 'Hallies', 'Hatties', 'Hetties', 'Hollies', 'Jackies', 'Jamies', 'Janies', 'Jannies', 'Jeanies', 'Jeannies', 'Jennies', 'Jessies', 'Jimmies', 'Jodies', 'Johnies', 'Johnnies', 'Josies', 'Julies', 'Kalgoorlies', 'Kathies', 'Katies', 'Kellies', 'Kewpies', 'Kristies', 'Laramies', 'Lassies', 'Lauries', 'Leslies', 'Lessies', 'Lillies', 'Lizzies', 'Lonnies', 'Lories', 'Lorries', 'Lotties', 'Louies', 'Mackenzies', 'Maggies', 'Maisies', 'Mamies', 'Marcies', 'Margies', 'Maries', 'Marjories', 'Matties', 'McKenzies', 'Melanies', 'Mickies', 'Millies', 'Minnies', 'Mollies', 'Mounties', 'Nannies', 'Natalies', 'Nellies', 'Netties', 'Ollies', 'Ozzies', 'Pearlies', 'Pottawatomies', 'Reggies', 'Richies', 'Rickies', 'Robbies', 'Ronnies', 'Rosalies', 'Rosemaries', 'Rosies', 'Roxies', 'Rushdies', 'Ruthies', 'Sadies', 'Sallies', 'Sammies', 'Scotties', 'Selassies', 'Sherries', 'Sophies', 'Stacies', 'Stefanies', 'Stephanies', 'Stevies', 'Susies', 'Sylvies', 'Tammies', 'Terries', 'Tessies', 'Tommies', 'Tracies', 'Trekkies', 'Valaries', 'Valeries', 'Valkyries', 'Vickies', 'Virgies', 'Willies', 'Winnies', 'Wylies', 'Yorkies', ) si_sb_ies_ie = ( 'aeries', 'baggies', 'belies', 'biggies', 'birdies', 'bogies', 'bonnies', 'boogies', 'bookies', 'bourgeoisies', 'brownies', 'budgies', 'caddies', 'calories', 'camaraderies', 'cockamamies', 'collies', 'cookies', 'coolies', 'cooties', 'coteries', 'crappies', 'curies', 'cutesies', 'dogies', 'eyrie', 'floozies', 'footsies', 'freebies', 'genies', 'goalies', 'groupies', 'hies', 'jalousies', 'junkies', 'kiddies', 'laddies', 'lassies', 'lies', 'lingeries', 'magpies', 'menageries', 'mommies', 'movies', 'neckties', 'newbies', 'nighties', 'oldies', 'organdies', 'overlies', 'pies', 'pinkies', 'pixies', 'potpies', 'prairies', 'quickies', 'reveries', 'rookies', 'rotisseries', 'softies', 'sorties', 'species', 'stymies', 'sweeties', 'ties', 'underlies', 'unties', 'veggies', 'vies', 'yuppies', 'zombies', ) si_sb_oes_oe_case = ( 'Chloes', 'Crusoes', 'Defoes', 'Faeroes', 'Ivanhoes', 'Joes', 'McEnroes', 'Moes', 'Monroes', 'Noes', 'Poes', 'Roscoes', 'Tahoes', 'Tippecanoes', 'Zoes', ) si_sb_oes_oe = ( 'aloes', 'backhoes', 'canoes', 'does', 'floes', 'foes', 'hoes', 'mistletoes', 'oboes', 'pekoes', 'roes', 'sloes', 'throes', 'tiptoes', 'toes', 'woes', ) si_sb_z_zes = ( "quartzes", "topazes", ) si_sb_zzes_zz = ( 'buzzes', 'fizzes', 'frizzes', 'razzes' ) si_sb_ches_che_case = ( 'Andromaches', 'Apaches', 'Blanches', 'Comanches', 'Nietzsches', 'Porsches', 'Roches', ) si_sb_ches_che = ( 'aches', 'avalanches', 'backaches', 'bellyaches', 'caches', 'cloches', 'creches', 'douches', 'earaches', 'fiches', 'headaches', 'heartaches', 'microfiches', 'niches', 'pastiches', 'psyches', 'quiches', 'stomachaches', 'toothaches', ) si_sb_xes_xe = ( 'annexes', 'axes', 'deluxes', 'pickaxes', ) si_sb_sses_sse_case = ( 'Hesses', 'Jesses', 'Larousses', 'Matisses', ) si_sb_sses_sse = ( 'bouillabaisses', 'crevasses', 'demitasses', 'impasses', 'mousses', 'posses', ) si_sb_ves_ve_case = ( # *[nwl]ives -> [nwl]live 'Clives', 'Palmolives', ) si_sb_ves_ve = ( # *[^d]eaves -> eave 'interweaves', 'weaves', # *[nwl]ives -> [nwl]live 'olives', # *[eoa]lves -> [eoa]lve 'bivalves', 'dissolves', 'resolves', 'salves', 'twelves', 'valves', ) plverb_special_s = enclose('|'.join( [pl_sb_singular_s] + pl_sb_uninflected_s + list(pl_sb_irregular_s.keys()) + [ '(.*[csx])is', '(.*)ceps', '[A-Z].*s', ] )) pl_sb_postfix_adj = { 'general': ['(?!major|lieutenant|brigadier|adjutant|.*star)\S+'], 'martial': ['court'], } for k in list(pl_sb_postfix_adj.keys()): pl_sb_postfix_adj[k] = enclose( enclose('|'.join(pl_sb_postfix_adj[k])) + "(?=(?:-|\\s+)%s)" % k) pl_sb_postfix_adj_stems = '(' + '|'.join(list(pl_sb_postfix_adj.values())) + ')(.*)' # PLURAL WORDS ENDING IS es GO TO SINGULAR is si_sb_es_is = ( 'amanuenses', 'amniocenteses', 'analyses', 'antitheses', 'apotheoses', 'arterioscleroses', 'atheroscleroses', 'axes', # 'bases', # bases -> basis 'catalyses', 'catharses', 'chasses', 'cirrhoses', 'cocces', 'crises', 'diagnoses', 'dialyses', 'diereses', 'electrolyses', 'emphases', 'exegeses', 'geneses', 'halitoses', 'hydrolyses', 'hypnoses', 'hypotheses', 'hystereses', 'metamorphoses', 'metastases', 'misdiagnoses', 'mitoses', 'mononucleoses', 'narcoses', 'necroses', 'nemeses', 'neuroses', 'oases', 'osmoses', 'osteoporoses', 'paralyses', 'parentheses', 'parthenogeneses', 'periphrases', 'photosyntheses', 'probosces', 'prognoses', 'prophylaxes', 'prostheses', 'preces', 'psoriases', 'psychoanalyses', 'psychokineses', 'psychoses', 'scleroses', 'scolioses', 'sepses', 'silicoses', 'symbioses', 'synopses', 'syntheses', 'taxes', 'telekineses', 'theses', 'thromboses', 'tuberculoses', 'urinalyses', ) pl_prep_list = """ about above across after among around at athwart before behind below beneath beside besides between betwixt beyond but by during except for from in into near of off on onto out over since till to under until unto upon with""".split() pl_prep_list_da = pl_prep_list + ['de', 'du', 'da'] pl_prep_bysize = bysize(pl_prep_list_da) pl_prep = enclose('|'.join(pl_prep_list_da)) pl_sb_prep_dual_compound = r'(.*?)((?:-|\s+)(?:' + pl_prep + r')(?:-|\s+))a(?:-|\s+)(.*)' singular_pronoun_genders = set(['neuter', 'feminine', 'masculine', 'gender-neutral', 'feminine or masculine', 'masculine or feminine']) pl_pron_nom = { # NOMINATIVE REFLEXIVE "i": "we", "myself": "ourselves", "you": "you", "yourself": "yourselves", "she": "they", "herself": "themselves", "he": "they", "himself": "themselves", "it": "they", "itself": "themselves", "they": "they", "themself": "themselves", # POSSESSIVE "mine": "ours", "yours": "yours", "hers": "theirs", "his": "theirs", "its": "theirs", "theirs": "theirs", } si_pron = {} si_pron['nom'] = dict([(v, k) for (k, v) in pl_pron_nom.items()]) si_pron['nom']['we'] = 'I' pl_pron_acc = { # ACCUSATIVE REFLEXIVE "me": "us", "myself": "ourselves", "you": "you", "yourself": "yourselves", "her": "them", "herself": "themselves", "him": "them", "himself": "themselves", "it": "them", "itself": "themselves", "them": "them", "themself": "themselves", } pl_pron_acc_keys = enclose('|'.join(list(pl_pron_acc.keys()))) pl_pron_acc_keys_bysize = bysize(list(pl_pron_acc.keys())) si_pron['acc'] = dict([(v, k) for (k, v) in pl_pron_acc.items()]) for thecase, plur, gend, sing in ( ('nom', 'they', 'neuter', 'it'), ('nom', 'they', 'feminine', 'she'), ('nom', 'they', 'masculine', 'he'), ('nom', 'they', 'gender-neutral', 'they'), ('nom', 'they', 'feminine or masculine', 'she or he'), ('nom', 'they', 'masculine or feminine', 'he or she'), ('nom', 'themselves', 'neuter', 'itself'), ('nom', 'themselves', 'feminine', 'herself'), ('nom', 'themselves', 'masculine', 'himself'), ('nom', 'themselves', 'gender-neutral', 'themself'), ('nom', 'themselves', 'feminine or masculine', 'herself or himself'), ('nom', 'themselves', 'masculine or feminine', 'himself or herself'), ('nom', 'theirs', 'neuter', 'its'), ('nom', 'theirs', 'feminine', 'hers'), ('nom', 'theirs', 'masculine', 'his'), ('nom', 'theirs', 'gender-neutral', 'theirs'), ('nom', 'theirs', 'feminine or masculine', 'hers or his'), ('nom', 'theirs', 'masculine or feminine', 'his or hers'), ('acc', 'them', 'neuter', 'it'), ('acc', 'them', 'feminine', 'her'), ('acc', 'them', 'masculine', 'him'), ('acc', 'them', 'gender-neutral', 'them'), ('acc', 'them', 'feminine or masculine', 'her or him'), ('acc', 'them', 'masculine or feminine', 'him or her'), ('acc', 'themselves', 'neuter', 'itself'), ('acc', 'themselves', 'feminine', 'herself'), ('acc', 'themselves', 'masculine', 'himself'), ('acc', 'themselves', 'gender-neutral', 'themself'), ('acc', 'themselves', 'feminine or masculine', 'herself or himself'), ('acc', 'themselves', 'masculine or feminine', 'himself or herself'), ): try: si_pron[thecase][plur][gend] = sing except TypeError: si_pron[thecase][plur] = {} si_pron[thecase][plur][gend] = sing si_pron_acc_keys = enclose('|'.join(list(si_pron['acc'].keys()))) si_pron_acc_keys_bysize = bysize(list(si_pron['acc'].keys())) def get_si_pron(thecase, word, gender): try: sing = si_pron[thecase][word] except KeyError: raise # not a pronoun try: return sing[gender] # has several types due to gender except TypeError: return sing # answer independent of gender plverb_irregular_pres = { # 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR # 3RD PERS. (INDET.) "am": "are", "are": "are", "is": "are", "was": "were", "were": "were", "was": "were", "have": "have", "have": "have", "has": "have", "do": "do", "do": "do", "does": "do", } plverb_ambiguous_pres = { # 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR # 3RD PERS. (INDET.) "act": "act", "act": "act", "acts": "act", "blame": "blame", "blame": "blame", "blames": "blame", "can": "can", "can": "can", "can": "can", "must": "must", "must": "must", "must": "must", "fly": "fly", "fly": "fly", "flies": "fly", "copy": "copy", "copy": "copy", "copies": "copy", "drink": "drink", "drink": "drink", "drinks": "drink", "fight": "fight", "fight": "fight", "fights": "fight", "fire": "fire", "fire": "fire", "fires": "fire", "like": "like", "like": "like", "likes": "like", "look": "look", "look": "look", "looks": "look", "make": "make", "make": "make", "makes": "make", "reach": "reach", "reach": "reach", "reaches": "reach", "run": "run", "run": "run", "runs": "run", "sink": "sink", "sink": "sink", "sinks": "sink", "sleep": "sleep", "sleep": "sleep", "sleeps": "sleep", "view": "view", "view": "view", "views": "view", } plverb_ambiguous_pres_keys = enclose('|'.join(list(plverb_ambiguous_pres.keys()))) plverb_irregular_non_pres = ( "did", "had", "ate", "made", "put", "spent", "fought", "sank", "gave", "sought", "shall", "could", "ought", "should", ) plverb_ambiguous_non_pres = enclose('|'.join(( "thought", "saw", "bent", "will", "might", "cut", ))) # "..oes" -> "..oe" (the rest are "..oes" -> "o") pl_v_oes_oe = ('canoes', 'floes', 'oboes', 'roes', 'throes', 'woes') pl_v_oes_oe_endings_size4 = ('hoes', 'toes') pl_v_oes_oe_endings_size5 = ('shoes') pl_count_zero = ( "0", "no", "zero", "nil" ) pl_count_one = ( "1", "a", "an", "one", "each", "every", "this", "that", ) pl_adj_special = { "a": "some", "an": "some", "this": "these", "that": "those", } pl_adj_special_keys = enclose('|'.join(list(pl_adj_special.keys()))) pl_adj_poss = { "my": "our", "your": "your", "its": "their", "her": "their", "his": "their", "their": "their", } pl_adj_poss_keys = enclose('|'.join(list(pl_adj_poss.keys()))) # 2. INDEFINITE ARTICLES # THIS PATTERN MATCHES STRINGS OF CAPITALS STARTING WITH A "VOWEL-SOUND" # CONSONANT FOLLOWED BY ANOTHER CONSONANT, AND WHICH ARE NOT LIKELY # TO BE REAL WORDS (OH, ALL RIGHT THEN, IT'S JUST MAGIC!) A_abbrev = r""" (?! FJO | [HLMNS]Y. | RY[EO] | SQU | ( F[LR]? | [HL] | MN? | N | RH? | S[CHKLMNPTVW]? | X(YL)?) [AEIOU]) [FHLMNRSX][A-Z] """ # THIS PATTERN CODES THE BEGINNINGS OF ALL ENGLISH WORDS BEGINING WITH A # 'y' FOLLOWED BY A CONSONANT. ANY OTHER Y-CONSONANT PREFIX THEREFORE # IMPLIES AN ABBREVIATION. A_y_cons = 'y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)' # EXCEPTIONS TO EXCEPTIONS A_explicit_a = enclose('|'.join(( "unabomber", "unanimous", "US", ))) A_explicit_an = enclose('|'.join(( "euler", "hour(?!i)", "heir", "honest", "hono[ur]", "mpeg", ))) A_ordinal_an = enclose('|'.join(( "[aefhilmnorsx]-?th", ))) A_ordinal_a = enclose('|'.join(( "[bcdgjkpqtuvwyz]-?th", ))) # NUMERICAL INFLECTIONS nth = { 0: 'th', 1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', 6: 'th', 7: 'th', 8: 'th', 9: 'th', 11: 'th', 12: 'th', 13: 'th', } ordinal = dict(ty='tieth', one='first', two='second', three='third', five='fifth', eight='eighth', nine='ninth', twelve='twelfth') ordinal_suff = '|'.join(list(ordinal.keys())) # NUMBERS unit = ['', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] teen = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'] ten = ['', '', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'] mill = [' ', ' thousand', ' million', ' billion', ' trillion', ' quadrillion', ' quintillion', ' sextillion', ' septillion', ' octillion', ' nonillion', ' decillion'] # SUPPORT CLASSICAL PLURALIZATIONS def_classical = dict( all=False, zero=False, herd=False, names=True, persons=False, ancient=False, ) all_classical = dict((k, True) for k in list(def_classical.keys())) no_classical = dict((k, False) for k in list(def_classical.keys())) # TODO: .inflectrc file does not work # can't just execute methods from another file like this # for rcfile in (pathjoin(dirname(__file__), '.inflectrc'), # expanduser(pathjoin(('~'), '.inflectrc'))): # if isfile(rcfile): # try: # execfile(rcfile) # except: # print3("\nBad .inflectrc file (%s):\n" % rcfile) # raise BadRcFileError class engine: def __init__(self): self.classical_dict = def_classical.copy() self.persistent_count = None self.mill_count = 0 self.pl_sb_user_defined = [] self.pl_v_user_defined = [] self.pl_adj_user_defined = [] self.si_sb_user_defined = [] self.A_a_user_defined = [] self.thegender = 'neuter' deprecated_methods = dict(pl='plural', plnoun='plural_noun', plverb='plural_verb', pladj='plural_adj', sinoun='single_noun', prespart='present_participle', numwords='number_to_words', plequal='compare', plnounequal='compare_nouns', plverbequal='compare_verbs', pladjequal='compare_adjs', wordlist='join', ) def __getattr__(self, meth): if meth in self.deprecated_methods: print3('%s() deprecated, use %s()' % (meth, self.deprecated_methods[meth])) raise DeprecationWarning raise AttributeError def defnoun(self, singular, plural): ''' Set the noun plural of singular to plural. ''' self.checkpat(singular) self.checkpatplural(plural) self.pl_sb_user_defined.extend((singular, plural)) self.si_sb_user_defined.extend((plural, singular)) return 1 def defverb(self, s1, p1, s2, p2, s3, p3): ''' Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively. Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb. ''' self.checkpat(s1) self.checkpat(s2) self.checkpat(s3) self.checkpatplural(p1) self.checkpatplural(p2) self.checkpatplural(p3) self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3)) return 1 def defadj(self, singular, plural): ''' Set the adjective plural of singular to plural. ''' self.checkpat(singular) self.checkpatplural(plural) self.pl_adj_user_defined.extend((singular, plural)) return 1 def defa(self, pattern): ''' Define the indefinate article as 'a' for words matching pattern. ''' self.checkpat(pattern) self.A_a_user_defined.extend((pattern, 'a')) return 1 def defan(self, pattern): ''' Define the indefinate article as 'an' for words matching pattern. ''' self.checkpat(pattern) self.A_a_user_defined.extend((pattern, 'an')) return 1 def checkpat(self, pattern): ''' check for errors in a regex pattern ''' if pattern is None: return try: match(pattern, '') except reerror: print3("\nBad user-defined singular pattern:\n\t%s\n" % pattern) raise BadUserDefinedPatternError def checkpatplural(self, pattern): ''' check for errors in a regex replace pattern ''' return # can't find a pattern that doesn't pass the following test: # if pattern is None: # return # try: # resub('', pattern, '') # except reerror: # print3("\nBad user-defined plural pattern:\n\t%s\n" % pattern) # raise BadUserDefinedPatternError def ud_match(self, word, wordlist): for i in range(len(wordlist) - 2, -2, -2): # backwards through even elements mo = search(r'^%s$' % wordlist[i], word, IGNORECASE) if mo: if wordlist[i + 1] is None: return None pl = resub(r'\$(\d+)', r'\\1', wordlist[i + 1]) # change $n to \n for expand return mo.expand(pl) return None def classical(self, **kwargs): """ turn classical mode on and off for various categories turn on all classical modes: classical() classical(all=True) turn on or off specific claassical modes: e.g. classical(herd=True) classical(names=False) By default all classical modes are off except names. unknown value in args or key in kwargs rasies exception: UnknownClasicalModeError """ classical_mode = list(def_classical.keys()) if not kwargs: self.classical_dict = all_classical.copy() return if 'all' in kwargs: if kwargs['all']: self.classical_dict = all_classical.copy() else: self.classical_dict = no_classical.copy() for k, v in list(kwargs.items()): if k in classical_mode: self.classical_dict[k] = v else: raise UnknownClassicalModeError def num(self, count=None, show=None): # (;$count,$show) ''' Set the number to be used in other method calls. Returns count. Set show to False to return '' instead. ''' if count is not None: try: self.persistent_count = int(count) except ValueError: raise BadNumValueError if (show is None) or show: return str(count) else: self.persistent_count = None return '' def gender(self, gender): ''' set the gender for the singular of plural pronouns can be one of: 'neuter' ('they' -> 'it') 'feminine' ('they' -> 'she') 'masculine' ('they' -> 'he') 'gender-neutral' ('they' -> 'they') 'feminine or masculine' ('they' -> 'she or he') 'masculine or feminine' ('they' -> 'he or she') ''' if gender in singular_pronoun_genders: self.thegender = gender else: raise BadGenderError def nummo(self, matchobject): ''' num but take a matchobject use groups 1 and 2 in matchobject ''' return self.num(matchobject.group(1), matchobject.group(2)) def plmo(self, matchobject): ''' plural but take a matchobject use groups 1 and 3 in matchobject ''' return self.plural(matchobject.group(1), matchobject.group(3)) def plnounmo(self, matchobject): ''' plural_noun but take a matchobject use groups 1 and 3 in matchobject ''' return self.plural_noun(matchobject.group(1), matchobject.group(3)) def plverbmo(self, matchobject): ''' plural_verb but take a matchobject use groups 1 and 3 in matchobject ''' return self.plural_verb(matchobject.group(1), matchobject.group(3)) def pladjmo(self, matchobject): ''' plural_adj but take a matchobject use groups 1 and 3 in matchobject ''' return self.plural_adj(matchobject.group(1), matchobject.group(3)) def sinounmo(self, matchobject): ''' singular_noun but take a matchobject use groups 1 and 3 in matchobject ''' return self.singular_noun(matchobject.group(1), matchobject.group(3)) def amo(self, matchobject): ''' A but take a matchobject use groups 1 and 3 in matchobject ''' if matchobject.group(3) is None: return self.a(matchobject.group(1)) return self.a(matchobject.group(1), matchobject.group(3)) def nomo(self, matchobject): ''' NO but take a matchobject use groups 1 and 3 in matchobject ''' return self.no(matchobject.group(1), matchobject.group(3)) def ordinalmo(self, matchobject): ''' ordinal but take a matchobject use group 1 ''' return self.ordinal(matchobject.group(1)) def numwordsmo(self, matchobject): ''' number_to_words but take a matchobject use group 1 ''' return self.number_to_words(matchobject.group(1)) def prespartmo(self, matchobject): ''' prespart but take a matchobject use group 1 ''' return self.present_participle(matchobject.group(1)) # 0. PERFORM GENERAL INFLECTIONS IN A STRING def inflect(self, text): ''' Perform inflections in a string. e.g. inflect('The plural of cat is plural(cat)') returns 'The plural of cat is cats' can use plural, plural_noun, plural_verb, plural_adj, singular_noun, a, an, no, ordinal, number_to_words and prespart ''' save_persistent_count = self.persistent_count sections = splitre(r"(num\([^)]*\))", text) inflection = [] for section in sections: (section, count) = subn(r"num\(\s*?(?:([^),]*)(?:,([^)]*))?)?\)", self.nummo, section) if not count: total = -1 while total: (section, total) = subn( r"(?x)\bplural \( ([^),]*) (, ([^)]*) )? \) ", self.plmo, section) (section, count) = subn( r"(?x)\bplural_noun \( ([^),]*) (, ([^)]*) )? \) ", self.plnounmo, section) total += count (section, count) = subn( r"(?x)\bplural_verb \( ([^),]*) (, ([^)]*) )? \) ", self.plverbmo, section) total += count (section, count) = subn( r"(?x)\bplural_adj \( ([^),]*) (, ([^)]*) )? \) ", self.pladjmo, section) total += count (section, count) = subn( r"(?x)\bsingular_noun \( ([^),]*) (, ([^)]*) )? \) ", self.sinounmo, section) total += count (section, count) = subn( r"(?x)\ban? \( ([^),]*) (, ([^)]*) )? \) ", self.amo, section) total += count (section, count) = subn( r"(?x)\bno \( ([^),]*) (, ([^)]*) )? \) ", self.nomo, section) total += count (section, count) = subn( r"(?x)\bordinal \( ([^)]*) \) ", self.ordinalmo, section) total += count (section, count) = subn( r"(?x)\bnumber_to_words \( ([^)]*) \) ", self.numwordsmo, section) total += count (section, count) = subn( r"(?x)\bpresent_participle \( ([^)]*) \) ", self.prespartmo, section) total += count inflection.append(section) self.persistent_count = save_persistent_count return "".join(inflection) # ## PLURAL SUBROUTINES def postprocess(self, orig, inflected): """ FIX PEDANTRY AND CAPITALIZATION :-) """ if '|' in inflected: inflected = inflected.split('|')[self.classical_dict['all']] if orig == "I": return inflected if orig == orig.upper(): return inflected.upper() if orig[0] == orig[0].upper(): return '%s%s' % (inflected[0].upper(), inflected[1:]) return inflected def partition_word(self, text): mo = search(r'\A(\s*)(.+?)(\s*)\Z', text) try: return mo.group(1), mo.group(2), mo.group(3) except AttributeError: # empty string return '', '', '' # def pl(self, *args, **kwds): # print 'pl() deprecated, use plural()' # raise DeprecationWarning # return self.plural(*args, **kwds) # # def plnoun(self, *args, **kwds): # print 'plnoun() deprecated, use plural_noun()' # raise DeprecationWarning # return self.plural_noun(*args, **kwds) # # def plverb(self, *args, **kwds): # print 'plverb() deprecated, use plural_verb()' # raise DeprecationWarning # return self.plural_verb(*args, **kwds) # # def pladj(self, *args, **kwds): # print 'pladj() deprecated, use plural_adj()' # raise DeprecationWarning # return self.plural_adj(*args, **kwds) # # def sinoun(self, *args, **kwds): # print 'sinoun() deprecated, use singular_noun()' # raise DeprecationWarning # return self.singular_noun(*args, **kwds) # # def prespart(self, *args, **kwds): # print 'prespart() deprecated, use present_participle()' # raise DeprecationWarning # return self.present_participle(*args, **kwds) # # def numwords(self, *args, **kwds): # print 'numwords() deprecated, use number_to_words()' # raise DeprecationWarning # return self.number_to_words(*args, **kwds) def plural(self, text, count=None): ''' Return the plural of text. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved. ''' pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess( word, self._pl_special_adjective(word, count) or self._pl_special_verb(word, count) or self._plnoun(word, count)) return "%s%s%s" % (pre, plural, post) def plural_noun(self, text, count=None): ''' Return the plural of text, where text is a noun. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved. ''' pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess(word, self._plnoun(word, count)) return "%s%s%s" % (pre, plural, post) def plural_verb(self, text, count=None): ''' Return the plural of text, where text is a verb. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved. ''' pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess(word, self._pl_special_verb(word, count) or self._pl_general_verb(word, count)) return "%s%s%s" % (pre, plural, post) def plural_adj(self, text, count=None): ''' Return the plural of text, where text is an adjective. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved. ''' pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess(word, self._pl_special_adjective(word, count) or word) return "%s%s%s" % (pre, plural, post) def compare(self, word1, word2): ''' compare word1 and word2 for equality regardless of plurality return values: eq - the strings are equal p:s - word1 is the plural of word2 s:p - word2 is the plural of word1 p:p - word1 and word2 are two different plural forms of the one word False - otherwise ''' return ( self._plequal(word1, word2, self.plural_noun) or self._plequal(word1, word2, self.plural_verb) or self._plequal(word1, word2, self.plural_adj)) def compare_nouns(self, word1, word2): ''' compare word1 and word2 for equality regardless of plurality word1 and word2 are to be treated as nouns return values: eq - the strings are equal p:s - word1 is the plural of word2 s:p - word2 is the plural of word1 p:p - word1 and word2 are two different plural forms of the one word False - otherwise ''' return self._plequal(word1, word2, self.plural_noun) def compare_verbs(self, word1, word2): ''' compare word1 and word2 for equality regardless of plurality word1 and word2 are to be treated as verbs return values: eq - the strings are equal p:s - word1 is the plural of word2 s:p - word2 is the plural of word1 p:p - word1 and word2 are two different plural forms of the one word False - otherwise ''' return self._plequal(word1, word2, self.plural_verb) def compare_adjs(self, word1, word2): ''' compare word1 and word2 for equality regardless of plurality word1 and word2 are to be treated as adjectives return values: eq - the strings are equal p:s - word1 is the plural of word2 s:p - word2 is the plural of word1 p:p - word1 and word2 are two different plural forms of the one word False - otherwise ''' return self._plequal(word1, word2, self.plural_adj) def singular_noun(self, text, count=None, gender=None): ''' Return the singular of text, where text is a plural noun. If count supplied, then return the singular if count is one of: 1, a, an, one, each, every, this, that or if count is None otherwise return text unchanged. Whitespace at the start and end is preserved. ''' pre, word, post = self.partition_word(text) if not word: return text sing = self._sinoun(word, count=count, gender=gender) if sing is not False: plural = self.postprocess(word, self._sinoun(word, count=count, gender=gender)) return "%s%s%s" % (pre, plural, post) return False def _plequal(self, word1, word2, pl): classval = self.classical_dict.copy() self.classical_dict = all_classical.copy() if word1 == word2: return "eq" if word1 == pl(word2): return "p:s" if pl(word1) == word2: return "s:p" self.classical_dict = no_classical.copy() if word1 == pl(word2): return "p:s" if pl(word1) == word2: return "s:p" self.classical_dict = classval.copy() if pl == self.plural or pl == self.plural_noun: if self._pl_check_plurals_N(word1, word2): return "p:p" if self._pl_check_plurals_N(word2, word1): return "p:p" if pl == self.plural or pl == self.plural_adj: if self._pl_check_plurals_adj(word1, word2): return "p:p" return False def _pl_reg_plurals(self, pair, stems, end1, end2): if search(r"(%s)(%s\|\1%s|%s\|\1%s)" % (stems, end1, end2, end2, end1), pair): return True return False def _pl_check_plurals_N(self, word1, word2): pair = "%s|%s" % (word1, word2) if pair in list(pl_sb_irregular_s.values()): return True if pair in list(pl_sb_irregular.values()): return True if pair in list(pl_sb_irregular_caps.values()): return True for (stems, end1, end2) in ( (pl_sb_C_a_ata, "as", "ata"), (pl_sb_C_is_ides, "is", "ides"), (pl_sb_C_a_ae, "s", "e"), (pl_sb_C_en_ina, "ens", "ina"), (pl_sb_C_um_a, "ums", "a"), (pl_sb_C_us_i, "uses", "i"), (pl_sb_C_on_a, "ons", "a"), (pl_sb_C_o_i_stems, "os", "i"), (pl_sb_C_ex_ices, "exes", "ices"), (pl_sb_C_ix_ices, "ixes", "ices"), (pl_sb_C_i, "s", "i"), (pl_sb_C_im, "s", "im"), ('.*eau', "s", "x"), ('.*ieu', "s", "x"), ('.*tri', "xes", "ces"), ('.{2,}[yia]n', "xes", "ges") ): if self._pl_reg_plurals(pair, stems, end1, end2): return True return False def _pl_check_plurals_adj(self, word1, word2): # VERSION: tuple in endswith requires python 2.5 word1a = word1[:word1.rfind("'")] if word1.endswith(("'s", "'")) else '' word2a = word2[:word2.rfind("'")] if word2.endswith(("'s", "'")) else '' # TODO: BUG? report upstream. I don't think you should chop off the s' # word1b = word1[:-2] if word1.endswith("s'") else '' # word2b = word2[:-2] if word2.endswith("s'") else '' # TODO: dresses', dresses's -> dresses, dresses when chop off letters # then they return False because they are the same. Need to fix this. if word1a: if word2a and (self._pl_check_plurals_N(word1a, word2a) or self._pl_check_plurals_N(word2a, word1a)): return True # if word2b and ( self._pl_check_plurals_N(word1a, word2b) # or self._pl_check_plurals_N(word2b, word1a) ): # return True # if word1b: # if word2a and ( self._pl_check_plurals_N(word1b, word2a) # or self._pl_check_plurals_N(word2a, word1b) ): # return True # if word2b and ( self._pl_check_plurals_N(word1b, word2b) # or self._pl_check_plurals_N(word2b, word1b) ): # return True return False def get_count(self, count=None): if count is None and self.persistent_count is not None: count = self.persistent_count if count is not None: count = 1 if ((str(count) in pl_count_one) or (self.classical_dict['zero'] and str(count).lower() in pl_count_zero)) else 2 else: count = '' return count # @profile def _plnoun(self, word, count=None): count = self.get_count(count) # DEFAULT TO PLURAL if count == 1: return word # HANDLE USER-DEFINED NOUNS value = self.ud_match(word, self.pl_sb_user_defined) if value is not None: return value # HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS if word == '': return word lowerword = word.lower() if lowerword in pl_sb_uninflected_complete: return word if word in pl_sb_uninflected_caps: return word for k, v in pl_sb_uninflected_bysize.items(): if lowerword[-k:] in v: return word if (self.classical_dict['herd'] and lowerword in pl_sb_uninflected_herd): return word mo = search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, IGNORECASE) if mo and mo.group(2) != '': return "%s%s" % (self._plnoun(mo.group(1), 2), mo.group(2)) if ' a ' in lowerword or '-a-' in lowerword: mo = search(r"^(?:%s)$" % pl_sb_prep_dual_compound, word, IGNORECASE) if mo and mo.group(2) != '' and mo.group(3) != '': return "%s%s%s" % (self._plnoun(mo.group(1), 2), mo.group(2), self._plnoun(mo.group(3))) lowersplit = lowerword.split(' ') if len(lowersplit) >= 3: for numword in range(1, len(lowersplit) - 1): if lowersplit[numword] in pl_prep_list_da: return ' '.join( lowersplit[:numword - 1] + [self._plnoun(lowersplit[numword - 1], 2)] + lowersplit[numword:]) lowersplit = lowerword.split('-') if len(lowersplit) >= 3: for numword in range(1, len(lowersplit) - 1): if lowersplit[numword] in pl_prep_list_da: return ' '.join( lowersplit[:numword - 1] + [self._plnoun(lowersplit[numword - 1], 2) + '-' + lowersplit[numword] + '-']) + ' '.join(lowersplit[(numword + 1):]) # HANDLE PRONOUNS for k, v in pl_pron_acc_keys_bysize.items(): if lowerword[-k:] in v: # ends with accusivate pronoun for pk, pv in pl_prep_bysize.items(): if lowerword[:pk] in pv: # starts with a prep if lowerword.split() == [lowerword[:pk], lowerword[-k:]]: # only whitespace in between return lowerword[:-k] + pl_pron_acc[lowerword[-k:]] try: return pl_pron_nom[word.lower()] except KeyError: pass try: return pl_pron_acc[word.lower()] except KeyError: pass # HANDLE ISOLATED IRREGULAR PLURALS wordsplit = word.split() wordlast = wordsplit[-1] lowerwordlast = wordlast.lower() if wordlast in list(pl_sb_irregular_caps.keys()): llen = len(wordlast) return '%s%s' % (word[:-llen], pl_sb_irregular_caps[wordlast]) if lowerwordlast in list(pl_sb_irregular.keys()): llen = len(lowerwordlast) return '%s%s' % (word[:-llen], pl_sb_irregular[lowerwordlast]) if (' '.join(wordsplit[-2:])).lower() in list(pl_sb_irregular_compound.keys()): llen = len(' '.join(wordsplit[-2:])) # TODO: what if 2 spaces between these words? return '%s%s' % (word[:-llen], pl_sb_irregular_compound[(' '.join(wordsplit[-2:])).lower()]) if lowerword[-3:] == 'quy': return word[:-1] + 'ies' if lowerword[-6:] == 'person': if self.classical_dict['persons']: return word + 's' else: return word[:-4] + 'ople' # HANDLE FAMILIES OF IRREGULAR PLURALS if lowerword[-3:] == 'man': for k, v in pl_sb_U_man_mans_bysize.items(): if lowerword[-k:] in v: return word + 's' for k, v in pl_sb_U_man_mans_caps_bysize.items(): if word[-k:] in v: return word + 's' return word[:-3] + 'men' if lowerword[-5:] == 'mouse': return word[:-5] + 'mice' if lowerword[-5:] == 'louse': return word[:-5] + 'lice' if lowerword[-5:] == 'goose': return word[:-5] + 'geese' if lowerword[-5:] == 'tooth': return word[:-5] + 'teeth' if lowerword[-4:] == 'foot': return word[:-4] + 'feet' if lowerword == 'die': return 'dice' # HANDLE UNASSIMILATED IMPORTS if lowerword[-4:] == 'ceps': return word if lowerword[-4:] == 'zoon': return word[:-2] + 'a' if lowerword[-3:] in ('cis', 'sis', 'xis'): return word[:-2] + 'es' for lastlet, d, numend, post in ( ('h', pl_sb_U_ch_chs_bysize, None, 's'), ('x', pl_sb_U_ex_ices_bysize, -2, 'ices'), ('x', pl_sb_U_ix_ices_bysize, -2, 'ices'), ('m', pl_sb_U_um_a_bysize, -2, 'a'), ('s', pl_sb_U_us_i_bysize, -2, 'i'), ('n', pl_sb_U_on_a_bysize, -2, 'a'), ('a', pl_sb_U_a_ae_bysize, None, 'e'), ): if lowerword[-1] == lastlet: # this test to add speed for k, v in d.items(): if lowerword[-k:] in v: return word[:numend] + post # HANDLE INCOMPLETELY ASSIMILATED IMPORTS if (self.classical_dict['ancient']): if lowerword[-4:] == 'trix': return word[:-1] + 'ces' if lowerword[-3:] in ('eau', 'ieu'): return word + 'x' if lowerword[-3:] in ('ynx', 'inx', 'anx') and len(word) > 4: return word[:-1] + 'ges' for lastlet, d, numend, post in ( ('n', pl_sb_C_en_ina_bysize, -2, 'ina'), ('x', pl_sb_C_ex_ices_bysize, -2, 'ices'), ('x', pl_sb_C_ix_ices_bysize, -2, 'ices'), ('m', pl_sb_C_um_a_bysize, -2, 'a'), ('s', pl_sb_C_us_i_bysize, -2, 'i'), ('s', pl_sb_C_us_us_bysize, None, ''), ('a', pl_sb_C_a_ae_bysize, None, 'e'), ('a', pl_sb_C_a_ata_bysize, None, 'ta'), ('s', pl_sb_C_is_ides_bysize, -1, 'des'), ('o', pl_sb_C_o_i_bysize, -1, 'i'), ('n', pl_sb_C_on_a_bysize, -2, 'a'), ): if lowerword[-1] == lastlet: # this test to add speed for k, v in d.items(): if lowerword[-k:] in v: return word[:numend] + post for d, numend, post in ( (pl_sb_C_i_bysize, None, 'i'), (pl_sb_C_im_bysize, None, 'im'), ): for k, v in d.items(): if lowerword[-k:] in v: return word[:numend] + post # HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS if lowerword in pl_sb_singular_s_complete: return word + 'es' for k, v in pl_sb_singular_s_bysize.items(): if lowerword[-k:] in v: return word + 'es' if lowerword[-2:] == 'es' and word[0] == word[0].upper(): return word + 'es' # Wouldn't special words # ending with 's' always have been caught, regardless of them starting # with a capital letter (i.e. being names) # It makes sense below to do this for words ending in 'y' so that # Sally -> Sallys. But not sure it makes sense here. Where is the case # of a word ending in s that is caught here and would otherwise have been # caught below? # # removing it as I can't find a case that executes it # TODO: check this again # # if (self.classical_dict['names']): # mo = search(r"([A-Z].*s)$", word) # if mo: # return "%ses" % mo.group(1) if lowerword[-1] == 'z': for k, v in pl_sb_z_zes_bysize.items(): if lowerword[-k:] in v: return word + 'es' if lowerword[-2:-1] != 'z': return word + 'zes' if lowerword[-2:] == 'ze': for k, v in pl_sb_ze_zes_bysize.items(): if lowerword[-k:] in v: return word + 's' if lowerword[-2:] in ('ch', 'sh', 'zz', 'ss') or lowerword[-1] == 'x': return word + 'es' # ## (r"(.*)(us)$", "%s%ses"), TODO: why is this commented? # HANDLE ...f -> ...ves if lowerword[-3:] in ('elf', 'alf', 'olf'): return word[:-1] + 'ves' if lowerword[-3:] == 'eaf' and lowerword[-4:-3] != 'd': return word[:-1] + 'ves' if lowerword[-4:] in ('nife', 'life', 'wife'): return word[:-2] + 'ves' if lowerword[-3:] == 'arf': return word[:-1] + 'ves' # HANDLE ...y if lowerword[-1] == 'y': if lowerword[-2:-1] in 'aeiou' or len(word) == 1: return word + 's' if (self.classical_dict['names']): if lowerword[-1] == 'y' and word[0] == word[0].upper(): return word + 's' return word[:-1] + 'ies' # HANDLE ...o if lowerword in pl_sb_U_o_os_complete: return word + 's' for k, v in pl_sb_U_o_os_bysize.items(): if lowerword[-k:] in v: return word + 's' if lowerword[-2:] in ('ao', 'eo', 'io', 'oo', 'uo'): return word + 's' if lowerword[-1] == 'o': return word + 'es' # OTHERWISE JUST ADD ...s return "%ss" % word def _pl_special_verb(self, word, count=None): if (self.classical_dict['zero'] and str(count).lower() in pl_count_zero): return False count = self.get_count(count) if count == 1: return word # HANDLE USER-DEFINED VERBS value = self.ud_match(word, self.pl_v_user_defined) if value is not None: return value # HANDLE IRREGULAR PRESENT TENSE (SIMPLE AND COMPOUND) lowerword = word.lower() try: firstword = lowerword.split()[0] except IndexError: return False # word is '' if firstword in list(plverb_irregular_pres.keys()): return "%s%s" % (plverb_irregular_pres[firstword], word[len(firstword):]) # HANDLE IRREGULAR FUTURE, PRETERITE AND PERFECT TENSES if firstword in plverb_irregular_non_pres: return word # HANDLE PRESENT NEGATIONS (SIMPLE AND COMPOUND) if firstword.endswith("n't") and firstword[:-3] in list(plverb_irregular_pres.keys()): return "%sn't%s" % (plverb_irregular_pres[firstword[:-3]], word[len(firstword):]) if firstword.endswith("n't"): return word # HANDLE SPECIAL CASES mo = search(r"^(%s)$" % plverb_special_s, word) if mo: return False if search(r"\s", word): return False if lowerword == 'quizzes': return 'quiz' # HANDLE STANDARD 3RD PERSON (CHOP THE ...(e)s OFF SINGLE WORDS) if lowerword[-4:] in ('ches', 'shes', 'zzes', 'sses') or \ lowerword[-3:] == 'xes': return word[:-2] # # mo = search(r"^(.*)([cs]h|[x]|zz|ss)es$", # # word, IGNORECASE) # # if mo: # # return "%s%s" % (mo.group(1), mo.group(2)) if lowerword[-3:] == 'ies' and len(word) > 3: return lowerword[:-3] + 'y' if (lowerword in pl_v_oes_oe or lowerword[-4:] in pl_v_oes_oe_endings_size4 or lowerword[-5:] in pl_v_oes_oe_endings_size5): return word[:-1] if lowerword.endswith('oes') and len(word) > 3: return lowerword[:-2] mo = search(r"^(.*[^s])s$", word, IGNORECASE) if mo: return mo.group(1) # OTHERWISE, A REGULAR VERB (HANDLE ELSEWHERE) return False def _pl_general_verb(self, word, count=None): count = self.get_count(count) if count == 1: return word # HANDLE AMBIGUOUS PRESENT TENSES (SIMPLE AND COMPOUND) mo = search(r"^(%s)((\s.*)?)$" % plverb_ambiguous_pres_keys, word, IGNORECASE) if mo: return "%s%s" % (plverb_ambiguous_pres[mo.group(1).lower()], mo.group(2)) # HANDLE AMBIGUOUS PRETERITE AND PERFECT TENSES mo = search(r"^(%s)((\s.*)?)$" % plverb_ambiguous_non_pres, word, IGNORECASE) if mo: return word # OTHERWISE, 1st OR 2ND PERSON IS UNINFLECTED return word def _pl_special_adjective(self, word, count=None): count = self.get_count(count) if count == 1: return word # HANDLE USER-DEFINED ADJECTIVES value = self.ud_match(word, self.pl_adj_user_defined) if value is not None: return value # HANDLE KNOWN CASES mo = search(r"^(%s)$" % pl_adj_special_keys, word, IGNORECASE) if mo: return "%s" % (pl_adj_special[mo.group(1).lower()]) # HANDLE POSSESSIVES mo = search(r"^(%s)$" % pl_adj_poss_keys, word, IGNORECASE) if mo: return "%s" % (pl_adj_poss[mo.group(1).lower()]) mo = search(r"^(.*)'s?$", word) if mo: pl = self.plural_noun(mo.group(1)) trailing_s = "" if pl[-1] == 's' else "s" return "%s'%s" % (pl, trailing_s) # OTHERWISE, NO IDEA return False # @profile def _sinoun(self, word, count=None, gender=None): count = self.get_count(count) # DEFAULT TO PLURAL if count == 2: return word # SET THE GENDER try: if gender is None: gender = self.thegender elif gender not in singular_pronoun_genders: raise BadGenderError except (TypeError, IndexError): raise BadGenderError # HANDLE USER-DEFINED NOUNS value = self.ud_match(word, self.si_sb_user_defined) if value is not None: return value # HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS if word == '': return word lowerword = word.lower() if word in si_sb_ois_oi_case: return word[:-1] if lowerword in pl_sb_uninflected_complete: return word if word in pl_sb_uninflected_caps: return word for k, v in pl_sb_uninflected_bysize.items(): if lowerword[-k:] in v: return word if (self.classical_dict['herd'] and lowerword in pl_sb_uninflected_herd): return word mo = search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, IGNORECASE) if mo and mo.group(2) != '': return "%s%s" % (self._sinoun(mo.group(1), 1, gender=gender), mo.group(2)) # how to reverse this one? # mo = search(r"^(?:%s)$" % pl_sb_prep_dual_compound, word, IGNORECASE) # if mo and mo.group(2) != '' and mo.group(3) != '': # return "%s%s%s" % (self._sinoun(mo.group(1), 1), # mo.group(2), # self._sinoun(mo.group(3), 1)) lowersplit = lowerword.split(' ') if len(lowersplit) >= 3: for numword in range(1, len(lowersplit) - 1): if lowersplit[numword] in pl_prep_list_da: return ' '.join(lowersplit[:numword - 1] + [self._sinoun(lowersplit[numword - 1], 1, gender=gender) or lowersplit[numword - 1]] + lowersplit[numword:]) lowersplit = lowerword.split('-') if len(lowersplit) >= 3: for numword in range(1, len(lowersplit) - 1): if lowersplit[numword] in pl_prep_list_da: return ' '.join( lowersplit[:numword - 1] + [(self._sinoun(lowersplit[numword - 1], 1, gender=gender) or lowersplit[numword - 1]) + '-' + lowersplit[numword] + '-']) + ' '.join(lowersplit[(numword + 1):]) # HANDLE PRONOUNS for k, v in si_pron_acc_keys_bysize.items(): if lowerword[-k:] in v: # ends with accusivate pronoun for pk, pv in pl_prep_bysize.items(): if lowerword[:pk] in pv: # starts with a prep if lowerword.split() == [lowerword[:pk], lowerword[-k:]]: # only whitespace in between return lowerword[:-k] + get_si_pron('acc', lowerword[-k:], gender) try: return get_si_pron('nom', word.lower(), gender) except KeyError: pass try: return get_si_pron('acc', word.lower(), gender) except KeyError: pass # HANDLE ISOLATED IRREGULAR PLURALS wordsplit = word.split() wordlast = wordsplit[-1] lowerwordlast = wordlast.lower() if wordlast in list(si_sb_irregular_caps.keys()): llen = len(wordlast) return '%s%s' % (word[:-llen], si_sb_irregular_caps[wordlast]) if lowerwordlast in list(si_sb_irregular.keys()): llen = len(lowerwordlast) return '%s%s' % (word[:-llen], si_sb_irregular[lowerwordlast]) if (' '.join(wordsplit[-2:])).lower() in list(si_sb_irregular_compound.keys()): llen = len(' '.join(wordsplit[-2:])) # TODO: what if 2 spaces between these words? return '%s%s' % (word[:-llen], si_sb_irregular_compound[(' '.join(wordsplit[-2:])).lower()]) if lowerword[-5:] == 'quies': return word[:-3] + 'y' if lowerword[-7:] == 'persons': return word[:-1] if lowerword[-6:] == 'people': return word[:-4] + 'rson' # HANDLE FAMILIES OF IRREGULAR PLURALS if lowerword[-4:] == 'mans': for k, v in si_sb_U_man_mans_bysize.items(): if lowerword[-k:] in v: return word[:-1] for k, v in si_sb_U_man_mans_caps_bysize.items(): if word[-k:] in v: return word[:-1] if lowerword[-3:] == 'men': return word[:-3] + 'man' if lowerword[-4:] == 'mice': return word[:-4] + 'mouse' if lowerword[-4:] == 'lice': return word[:-4] + 'louse' if lowerword[-5:] == 'geese': return word[:-5] + 'goose' if lowerword[-5:] == 'teeth': return word[:-5] + 'tooth' if lowerword[-4:] == 'feet': return word[:-4] + 'foot' if lowerword == 'dice': return 'die' # HANDLE UNASSIMILATED IMPORTS if lowerword[-4:] == 'ceps': return word if lowerword[-3:] == 'zoa': return word[:-1] + 'on' for lastlet, d, numend, post in ( ('s', si_sb_U_ch_chs_bysize, -1, ''), ('s', si_sb_U_ex_ices_bysize, -4, 'ex'), ('s', si_sb_U_ix_ices_bysize, -4, 'ix'), ('a', si_sb_U_um_a_bysize, -1, 'um'), ('i', si_sb_U_us_i_bysize, -1, 'us'), ('a', si_sb_U_on_a_bysize, -1, 'on'), ('e', si_sb_U_a_ae_bysize, -1, ''), ): if lowerword[-1] == lastlet: # this test to add speed for k, v in d.items(): if lowerword[-k:] in v: return word[:numend] + post # HANDLE INCOMPLETELY ASSIMILATED IMPORTS if (self.classical_dict['ancient']): if lowerword[-6:] == 'trices': return word[:-3] + 'x' if lowerword[-4:] in ('eaux', 'ieux'): return word[:-1] if lowerword[-5:] in ('ynges', 'inges', 'anges') and len(word) > 6: return word[:-3] + 'x' for lastlet, d, numend, post in ( ('a', si_sb_C_en_ina_bysize, -3, 'en'), ('s', si_sb_C_ex_ices_bysize, -4, 'ex'), ('s', si_sb_C_ix_ices_bysize, -4, 'ix'), ('a', si_sb_C_um_a_bysize, -1, 'um'), ('i', si_sb_C_us_i_bysize, -1, 'us'), ('s', pl_sb_C_us_us_bysize, None, ''), ('e', si_sb_C_a_ae_bysize, -1, ''), ('a', si_sb_C_a_ata_bysize, -2, ''), ('s', si_sb_C_is_ides_bysize, -3, 's'), ('i', si_sb_C_o_i_bysize, -1, 'o'), ('a', si_sb_C_on_a_bysize, -1, 'on'), ('m', si_sb_C_im_bysize, -2, ''), ('i', si_sb_C_i_bysize, -1, ''), ): if lowerword[-1] == lastlet: # this test to add speed for k, v in d.items(): if lowerword[-k:] in v: return word[:numend] + post # HANDLE PLURLS ENDING IN uses -> use if (lowerword[-6:] == 'houses' or word in si_sb_uses_use_case or lowerword in si_sb_uses_use): return word[:-1] # HANDLE PLURLS ENDING IN ies -> ie if word in si_sb_ies_ie_case or lowerword in si_sb_ies_ie: return word[:-1] # HANDLE PLURLS ENDING IN oes -> oe if (lowerword[-5:] == 'shoes' or word in si_sb_oes_oe_case or lowerword in si_sb_oes_oe): return word[:-1] # HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS if (word in si_sb_sses_sse_case or lowerword in si_sb_sses_sse): return word[:-1] if lowerword in si_sb_singular_s_complete: return word[:-2] for k, v in si_sb_singular_s_bysize.items(): if lowerword[-k:] in v: return word[:-2] if lowerword[-4:] == 'eses' and word[0] == word[0].upper(): return word[:-2] # Wouldn't special words # ending with 's' always have been caught, regardless of them starting # with a capital letter (i.e. being names) # It makes sense below to do this for words ending in 'y' so that # Sally -> Sallys. But not sure it makes sense here. Where is the case # of a word ending in s that is caught here and would otherwise have been # caught below? # # removing it as I can't find a case that executes it # TODO: check this again # # if (self.classical_dict['names']): # mo = search(r"([A-Z].*ses)$", word) # if mo: # return "%s" % mo.group(1) if lowerword in si_sb_z_zes: return word[:-2] if lowerword in si_sb_zzes_zz: return word[:-2] if lowerword[-4:] == 'zzes': return word[:-3] if (word in si_sb_ches_che_case or lowerword in si_sb_ches_che): return word[:-1] if lowerword[-4:] in ('ches', 'shes'): return word[:-2] if lowerword in si_sb_xes_xe: return word[:-1] if lowerword[-3:] == 'xes': return word[:-2] # (r"(.*)(us)es$", "%s%s"), TODO: why is this commented? # HANDLE ...f -> ...ves if (word in si_sb_ves_ve_case or lowerword in si_sb_ves_ve): return word[:-1] if lowerword[-3:] == 'ves': if lowerword[-5:-3] in ('el', 'al', 'ol'): return word[:-3] + 'f' if lowerword[-5:-3] == 'ea' and word[-6:-5] != 'd': return word[:-3] + 'f' if lowerword[-5:-3] in ('ni', 'li', 'wi'): return word[:-3] + 'fe' if lowerword[-5:-3] == 'ar': return word[:-3] + 'f' # HANDLE ...y if lowerword[-2:] == 'ys': if len(lowerword) > 2 and lowerword[-3] in 'aeiou': return word[:-1] if (self.classical_dict['names']): if lowerword[-2:] == 'ys' and word[0] == word[0].upper(): return word[:-1] if lowerword[-3:] == 'ies': return word[:-3] + 'y' # HANDLE ...o if lowerword[-2:] == 'os': if lowerword in si_sb_U_o_os_complete: return word[:-1] for k, v in si_sb_U_o_os_bysize.items(): if lowerword[-k:] in v: return word[:-1] if lowerword[-3:] in ('aos', 'eos', 'ios', 'oos', 'uos'): return word[:-1] if lowerword[-3:] == 'oes': return word[:-2] # UNASSIMILATED IMPORTS FINAL RULE if word in si_sb_es_is: return word[:-2] + 'is' # OTHERWISE JUST REMOVE ...s if lowerword[-1] == 's': return word[:-1] # COULD NOT FIND SINGULAR return False # ADJECTIVES def a(self, text, count=1): ''' Return the appropriate indefinite article followed by text. The indefinite article is either 'a' or 'an'. If count is not one, then return count followed by text instead of 'a' or 'an'. Whitespace at the start and end is preserved. ''' mo = search(r"\A(\s*)(?:an?\s+)?(.+?)(\s*)\Z", text, IGNORECASE) if mo: word = mo.group(2) if not word: return text pre = mo.group(1) post = mo.group(3) result = self._indef_article(word, count) return "%s%s%s" % (pre, result, post) return '' an = a def _indef_article(self, word, count): mycount = self.get_count(count) if mycount != 1: return "%s %s" % (count, word) # HANDLE USER-DEFINED VARIANTS value = self.ud_match(word, self.A_a_user_defined) if value is not None: return "%s %s" % (value, word) # HANDLE ORDINAL FORMS for a in ( (r"^(%s)" % A_ordinal_a, "a"), (r"^(%s)" % A_ordinal_an, "an"), ): mo = search(a[0], word, IGNORECASE) if mo: return "%s %s" % (a[1], word) # HANDLE SPECIAL CASES for a in ( (r"^(%s)" % A_explicit_an, "an"), (r"^[aefhilmnorsx]$", "an"), (r"^[bcdgjkpqtuvwyz]$", "a"), ): mo = search(a[0], word, IGNORECASE) if mo: return "%s %s" % (a[1], word) # HANDLE ABBREVIATIONS for a in ( (r"(%s)" % A_abbrev, "an", VERBOSE), (r"^[aefhilmnorsx][.-]", "an", IGNORECASE), (r"^[a-z][.-]", "a", IGNORECASE), ): mo = search(a[0], word, a[2]) if mo: return "%s %s" % (a[1], word) # HANDLE CONSONANTS mo = search(r"^[^aeiouy]", word, IGNORECASE) if mo: return "a %s" % word # HANDLE SPECIAL VOWEL-FORMS for a in ( (r"^e[uw]", "a"), (r"^onc?e\b", "a"), (r"^onetime\b", "a"), (r"^uni([^nmd]|mo)", "a"), (r"^u[bcfghjkqrst][aeiou]", "a"), (r"^ukr", "a"), (r"^(%s)" % A_explicit_a, "a"), ): mo = search(a[0], word, IGNORECASE) if mo: return "%s %s" % (a[1], word) # HANDLE SPECIAL CAPITALS mo = search(r"^U[NK][AIEO]?", word) if mo: return "a %s" % word # HANDLE VOWELS mo = search(r"^[aeiou]", word, IGNORECASE) if mo: return "an %s" % word # HANDLE y... (BEFORE CERTAIN CONSONANTS IMPLIES (UNNATURALIZED) "i.." SOUND) mo = search(r"^(%s)" % A_y_cons, word, IGNORECASE) if mo: return "an %s" % word # OTHERWISE, GUESS "a" return "a %s" % word # 2. TRANSLATE ZERO-QUANTIFIED $word TO "no plural($word)" def no(self, text, count=None): ''' If count is 0, no, zero or nil, return 'no' followed by the plural of text. If count is one of: 1, a, an, one, each, every, this, that return count followed by text. Otherwise return count follow by the plural of text. In the return value count is always followed by a space. Whitespace at the start and end is preserved. ''' if count is None and self.persistent_count is not None: count = self.persistent_count if count is None: count = 0 mo = search(r"\A(\s*)(.+?)(\s*)\Z", text) pre = mo.group(1) word = mo.group(2) post = mo.group(3) if str(count).lower() in pl_count_zero: return "%sno %s%s" % (pre, self.plural(word, 0), post) else: return "%s%s %s%s" % (pre, count, self.plural(word, count), post) # PARTICIPLES def present_participle(self, word): ''' Return the present participle for word. word is the 3rd person singular verb. ''' plv = self.plural_verb(word, 2) for pat, repl in ( (r"ie$", r"y"), (r"ue$", r"u"), # TODO: isn't ue$ -> u encompassed in the following rule? (r"([auy])e$", r"\g<1>"), (r"ski$", r"ski"), (r"[^b]i$", r""), (r"^(are|were)$", r"be"), (r"^(had)$", r"hav"), (r"^(hoe)$", r"\g<1>"), (r"([^e])e$", r"\g<1>"), (r"er$", r"er"), (r"([^aeiou][aeiouy]([bdgmnprst]))$", "\g<1>\g<2>"), ): (ans, num) = subn(pat, repl, plv) if num: return "%sing" % ans return "%sing" % ans # NUMERICAL INFLECTIONS def ordinal(self, num): ''' Return the ordinal of num. num can be an integer or text e.g. ordinal(1) returns '1st' ordinal('one') returns 'first' ''' if match(r"\d", str(num)): try: num % 2 n = num except TypeError: if '.' in str(num): try: n = int(num[-1]) # numbers after decimal, so only need last one for ordinal except ValueError: # ends with '.', so need to use whole string n = int(num[:-1]) else: n = int(num) try: post = nth[n % 100] except KeyError: post = nth[n % 10] return "%s%s" % (num, post) else: mo = search(r"(%s)\Z" % ordinal_suff, num) try: post = ordinal[mo.group(1)] return resub(r"(%s)\Z" % ordinal_suff, post, num) except AttributeError: return "%sth" % num def millfn(self, ind=0): if ind > len(mill) - 1: print3("number out of range") raise NumOutOfRangeError return mill[ind] def unitfn(self, units, mindex=0): return "%s%s" % (unit[units], self.millfn(mindex)) def tenfn(self, tens, units, mindex=0): if tens != 1: return "%s%s%s%s" % (ten[tens], '-' if tens and units else '', unit[units], self.millfn(mindex)) return "%s%s" % (teen[units], mill[mindex]) def hundfn(self, hundreds, tens, units, mindex): if hundreds: return "%s hundred%s%s%s, " % (unit[hundreds], # use unit not unitfn as simpler " %s " % self.number_args['andword'] if tens or units else '', self.tenfn(tens, units), self.millfn(mindex)) if tens or units: return "%s%s, " % (self.tenfn(tens, units), self.millfn(mindex)) return '' def group1sub(self, mo): units = int(mo.group(1)) if units == 1: return " %s, " % self.number_args['one'] elif units: # TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl return "%s, " % unit[units] else: return " %s, " % self.number_args['zero'] def group1bsub(self, mo): units = int(mo.group(1)) if units: # TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl return "%s, " % unit[units] else: return " %s, " % self.number_args['zero'] def group2sub(self, mo): tens = int(mo.group(1)) units = int(mo.group(2)) if tens: return "%s, " % self.tenfn(tens, units) if units: return " %s %s, " % (self.number_args['zero'], unit[units]) return " %s %s, " % (self.number_args['zero'], self.number_args['zero']) def group3sub(self, mo): hundreds = int(mo.group(1)) tens = int(mo.group(2)) units = int(mo.group(3)) if hundreds == 1: hunword = " %s" % self.number_args['one'] elif hundreds: hunword = "%s" % unit[hundreds] # TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl else: hunword = " %s" % self.number_args['zero'] if tens: tenword = self.tenfn(tens, units) elif units: tenword = " %s %s" % (self.number_args['zero'], unit[units]) else: tenword = " %s %s" % (self.number_args['zero'], self.number_args['zero']) return "%s %s, " % (hunword, tenword) def hundsub(self, mo): ret = self.hundfn(int(mo.group(1)), int(mo.group(2)), int(mo.group(3)), self.mill_count) self.mill_count += 1 return ret def tensub(self, mo): return "%s, " % self.tenfn(int(mo.group(1)), int(mo.group(2)), self.mill_count) def unitsub(self, mo): return "%s, " % self.unitfn(int(mo.group(1)), self.mill_count) def enword(self, num, group): # import pdb # pdb.set_trace() if group == 1: num = resub(r"(\d)", self.group1sub, num) elif group == 2: num = resub(r"(\d)(\d)", self.group2sub, num) num = resub(r"(\d)", self.group1bsub, num, 1) # group1bsub same as # group1sub except it doesn't use the default word for one. # Is this required? i.e. is the default word not to beused when # grouping in pairs? # # No. This is a bug. Fixed. TODO: report upstream. elif group == 3: num = resub(r"(\d)(\d)(\d)", self.group3sub, num) num = resub(r"(\d)(\d)", self.group2sub, num, 1) num = resub(r"(\d)", self.group1sub, num, 1) elif int(num) == 0: num = self.number_args['zero'] elif int(num) == 1: num = self.number_args['one'] else: num = num.lstrip().lstrip('0') self.mill_count = 0 # surely there's a better way to do the next bit mo = search(r"(\d)(\d)(\d)(?=\D*\Z)", num) while mo: num = resub(r"(\d)(\d)(\d)(?=\D*\Z)", self.hundsub, num, 1) mo = search(r"(\d)(\d)(\d)(?=\D*\Z)", num) num = resub(r"(\d)(\d)(?=\D*\Z)", self.tensub, num, 1) num = resub(r"(\d)(?=\D*\Z)", self.unitsub, num, 1) return num def blankfn(self, mo): ''' do a global blank replace TODO: surely this can be done with an option to resub rather than this fn ''' return '' def commafn(self, mo): ''' do a global ',' replace TODO: surely this can be done with an option to resub rather than this fn ''' return ',' def spacefn(self, mo): ''' do a global ' ' replace TODO: surely this can be done with an option to resub rather than this fn ''' return ' ' def number_to_words(self, num, wantlist=False, group=0, comma=',', andword='and', zero='zero', one='one', decimal='point', threshold=None): ''' Return a number in words. group = 1, 2 or 3 to group numbers before turning into words comma: define comma andword: word for 'and'. Can be set to ''. e.g. "one hundred and one" vs "one hundred one" zero: word for '0' one: word for '1' decimal: word for decimal point threshold: numbers above threshold not turned into words parameters not remembered from last call. Departure from Perl version. ''' self.number_args = dict(andword=andword, zero=zero, one=one) num = '%s' % num # Handle "stylistic" conversions (up to a given threshold)... if (threshold is not None and float(num) > threshold): spnum = num.split('.', 1) while (comma): (spnum[0], n) = subn(r"(\d)(\d{3}(?:,|\Z))", r"\1,\2", spnum[0]) if n == 0: break try: return "%s.%s" % (spnum[0], spnum[1]) except IndexError: return "%s" % spnum[0] if group < 0 or group > 3: raise BadChunkingOptionError nowhite = num.lstrip() if nowhite[0] == '+': sign = "plus" elif nowhite[0] == '-': sign = "minus" else: sign = "" myord = (num[-2:] in ('st', 'nd', 'rd', 'th')) if myord: num = num[:-2] finalpoint = False if decimal: if group != 0: chunks = num.split('.') else: chunks = num.split('.', 1) if chunks[-1] == '': # remove blank string if nothing after decimal chunks = chunks[:-1] finalpoint = True # add 'point' to end of output else: chunks = [num] first = 1 loopstart = 0 if chunks[0] == '': first = 0 if len(chunks) > 1: loopstart = 1 for i in range(loopstart, len(chunks)): chunk = chunks[i] # remove all non numeric \D chunk = resub(r"\D", self.blankfn, chunk) if chunk == "": chunk = "0" if group == 0 and (first == 0 or first == ''): chunk = self.enword(chunk, 1) else: chunk = self.enword(chunk, group) if chunk[-2:] == ', ': chunk = chunk[:-2] chunk = resub(r"\s+,", self.commafn, chunk) if group == 0 and first: chunk = resub(r", (\S+)\s+\Z", " %s \\1" % andword, chunk) chunk = resub(r"\s+", self.spacefn, chunk) # chunk = resub(r"(\A\s|\s\Z)", self.blankfn, chunk) chunk = chunk.strip() if first: first = '' chunks[i] = chunk numchunks = [] if first != 0: numchunks = chunks[0].split("%s " % comma) if myord and numchunks: # TODO: can this be just one re as it is in perl? mo = search(r"(%s)\Z" % ordinal_suff, numchunks[-1]) if mo: numchunks[-1] = resub(r"(%s)\Z" % ordinal_suff, ordinal[mo.group(1)], numchunks[-1]) else: numchunks[-1] += 'th' for chunk in chunks[1:]: numchunks.append(decimal) numchunks.extend(chunk.split("%s " % comma)) if finalpoint: numchunks.append(decimal) # wantlist: Perl list context. can explictly specify in Python if wantlist: if sign: numchunks = [sign] + numchunks return numchunks elif group: signout = "%s " % sign if sign else '' return "%s%s" % (signout, ", ".join(numchunks)) else: signout = "%s " % sign if sign else '' num = "%s%s" % (signout, numchunks.pop(0)) if decimal is None: first = True else: first = not num.endswith(decimal) for nc in numchunks: if nc == decimal: num += " %s" % nc first = 0 elif first: num += "%s %s" % (comma, nc) else: num += " %s" % nc return num # Join words with commas and a trailing 'and' (when appropriate)... def join(self, words, sep=None, sep_spaced=True, final_sep=None, conj='and', conj_spaced=True): ''' Join words into a list. e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly' options: conj: replacement for 'and' sep: separator. default ',', unless ',' is in the list then ';' final_sep: final separator. default ',', unless ',' is in the list then ';' conj_spaced: boolean. Should conj have spaces around it ''' if not words: return "" if len(words) == 1: return words[0] if conj_spaced: if conj == '': conj = ' ' else: conj = ' %s ' % conj if len(words) == 2: return "%s%s%s" % (words[0], conj, words[1]) if sep is None: if ',' in ''.join(words): sep = ';' else: sep = ',' if final_sep is None: final_sep = sep final_sep = "%s%s" % (final_sep, conj) if sep_spaced: sep += ' ' return "%s%s%s" % (sep.join(words[0:-1]), final_sep, words[-1])
94,476
30.273419
106
py
Reflect
Reflect-master/util/models.py
from tf2_models.capnet import Capsule from tf2_models.cnn import VanillaCNN from tf2_models.ff import VanillaFF from tf2_models.ff_resnet import FFResnet from tf2_models.lm_lstm import LmLSTM, LmLSTMSharedEmb, ClassifierLSTM, LmLSTMSharedEmbV2 from tf2_models.lm_transformer import LmGPT2, LmGPT2SharedWeights, ClassifierGPT2, ClassifierGPT2SharedWeights, \ ClassifierBERT, ClassifierBERTSharedWeights from tf2_models.matrix_caps import MatrixCaps from tf2_models.resnet import Resnet MODELS = {"lm_lstm": LmLSTM, "lm_gpt2": LmGPT2, "lm_gpt2_shared": LmGPT2SharedWeights, "lm_lstm_shared_emb": LmLSTMSharedEmbV2, 'cl_gpt2': ClassifierGPT2, 'cl_lstm': ClassifierLSTM, 'cl_gpt2_shared': ClassifierGPT2SharedWeights, 'cl_bert': ClassifierBERT, 'cl_bert_shared': ClassifierBERTSharedWeights, 'cl_vcnn': VanillaCNN, 'cl_vff': VanillaFF, 'cl_capsule': Capsule, 'matrix_capsule': MatrixCaps, 'resnet': Resnet, 'resnet_ff': FFResnet}
1,068
41.76
113
py
Reflect
Reflect-master/util/tasks.py
from tasks.lm1b import Lm1B from tasks.mnist import Mnist, AffNistTask, Svhn, Mnist40 from tasks.smallnorb import SmallNorb from tasks.sst import ClassifySST2, LmSST2 from tasks.sv_agreement import SvAgreementLM, WordSvAgreementLM, WordSvAgreementVP from tasks.wiki import WikiLM TASKS = { 'sv_agreement_lm': SvAgreementLM, 'word_sv_agreement_lm': WordSvAgreementLM, 'word_sv_agreement_vp': WordSvAgreementVP, 'mnist': Mnist, 'affnist': AffNistTask, 'smallnorb': SmallNorb, 'sst2': ClassifySST2, 'lm_sst2': LmSST2, 'lm1b': Lm1B, 'wikilm': WikiLM, 'svhn': Svhn, 'mnist40': Mnist40 }
606
27.904762
82
py
Reflect
Reflect-master/distill/offline_repshare.py
import tensorflow as tf import os from distill.distiller import Distiller from distill.online_distiller import OnlineDistiller from distill.repsim_util import get_reps from tf2_models.train_utils import ExponentialDecayWithWarmpUp from tf2_models.trainer import OPTIMIZER_DIC from tf2_models.utils import camel2snake from inspect import isfunction import numpy as np class OfflineRepDistiller(Distiller): """ Implementation of soft representation sharing in online mode """ def __init__(self, hparams, distill_params, teacher_model, student_model, teacher_task, student_task, teacher_log_dir, student_log_dir, teacher_ckpt_dir, student_ckpt_dir): self.teacher_model = teacher_model self.student_model = student_model self.student_task = student_task self.teacher_task = teacher_task self.hparams = hparams self.distill_params = distill_params self.temperature = tf.convert_to_tensor(distill_params.distill_temp) self.rep_loss = self.student_task.get_rep_loss() self.student_task_loss = self.student_task.get_loss_fn() self.teacher_task_loss = self.teacher_task.get_loss_fn() self.student_metrics = self.student_task.metrics() self.teacher_metrics = self.teacher_task.metrics() self.teacher_task_probs_fn = self.teacher_task.get_probs_fn() self.create_student_optimizer() self.setup_ckp_and_summary(student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir) self.setup_models(distill_params) def setup_models(self, distill_params): x_s, y_s = iter(self.student_task.valid_dataset).next() x_t, y_t = iter(self.teacher_task.valid_dataset).next() self.student_model(x_s) self.student_model.summary() self.teacher_model(x_t) self.teacher_model.summary() self.student_model.compile( optimizer=self.student_optimizer, loss=self.student_task_loss, metrics=[self.student_metrics]) self.teacher_model.compile( loss=self.teacher_task_loss, metrics=[self.teacher_metrics]) def distill_loop(self): @tf.function(experimental_relax_shapes=True) def student_train_step(x, y_s, teacher_logits, teacher_reps): ''' Training step for the student model (this is the only training step for offline distillation). :param x: input :param y: output of the teacher model, used to compute distill loss :param y_true: actual outputs, used to compute actual loss :return: distill_loss actual_loss ''' #teacher_probs = self.task_probs_fn(logits=teacher_logits, labels=y_t, temperature=self.temperature) with tf.GradientTape() as tape: #logits = self.student_model(x, training=True) logits, student_reps = get_reps(x, self.student_model, index=(0, self.student_model.rep_index), layer= (None, self.student_model.rep_layer), training=True) rep_loss = self.rep_loss(reps1=student_reps, reps2=teacher_reps, padding_symbol=self.student_task.output_padding_symbol) reg_loss = tf.math.add_n(self.student_model.losses) actual_loss = self.student_task_loss(y_pred=logits, y_true=y_s) final_loss = self.distill_params.student_distill_rep_rate * rep_loss + \ self.distill_params.student_gold_rate * actual_loss + reg_loss grads = tape.gradient(final_loss, self.student_model.trainable_weights) self.student_model.optimizer.apply_gradients(zip(grads, self.student_model.trainable_weights), name="student_optimizer") return rep_loss, actual_loss @tf.function def epoch_loop(): step = 0 student_train_examples = self.student_task.train_dataset for x_s, y_s in student_train_examples: teacher_logits, teacher_reps = get_reps(x_s, self.teacher_model, index=(0, self.teacher_model.rep_index), layer=(None, self.teacher_model.rep_layer), training=False) reg_loss = tf.math.add_n(self.teacher_model.losses) actual_loss = self.teacher_task_loss(y_pred=teacher_logits, y_true=y_s) teacher_loss = actual_loss + reg_loss distill_loss, actual_loss = student_train_step(x=x_s, y_s=y_s, teacher_logits=teacher_logits, teacher_reps=teacher_reps) # Log every 200 batches. if step % 200 == 0: with tf.summary.experimental.summary_scope("student_train"): tf.summary.scalar('student_learning_rate', self.student_model.optimizer.learning_rate(self.student_model.optimizer.iterations)) tf.summary.scalar('fine_distill_loss', distill_loss, ) with tf.summary.experimental.summary_scope("teacher_train"): tf.summary.scalar('teacher_loss', teacher_loss) step += 1 if step == self.student_task.n_train_batches: with tf.summary.experimental.summary_scope("student_train"): tf.summary.scalar('distill_loss', distill_loss) tf.summary.scalar('actual_loss', actual_loss) break with self.summary_writer.as_default(): num_epochs = self.distill_params.n_epochs for _ in tf.range(num_epochs): epoch_loop() teacher_eval_results = self.teacher_model.evaluate(self.teacher_task.valid_dataset, steps=self.teacher_task.n_valid_batches) # Evaluate Teacher with tf.summary.experimental.summary_scope("eval_teacher"): for i, m_name in enumerate(self.teacher_model.metrics_names): tf.summary.scalar(m_name, teacher_eval_results[i]) # Evaluate Student student_eval_results = self.student_model.evaluate(self.student_task.valid_dataset, steps=self.student_task.n_valid_batches) with tf.summary.experimental.summary_scope("eval_student"): for i, m_name in enumerate(self.student_model.metrics_names): tf.summary.scalar(m_name, student_eval_results[i]) self.save_student()
6,270
42.248276
128
py
Reflect
Reflect-master/distill/repsim_util.py
import tensorflow as tf import numpy as np def get_reps(outputs, index=1, layer=-1, **kwargs): """ If Model is LSTM: 1: final_rnn_outputs, 2: hidden_activation (for all layers, including input embeddings) reduction: None, "last", "sum" """ logits = outputs[0] outputs = tf.tuple(outputs) rep = outputs[index] if layer != -1 : rep = tf.gather(rep, layer) return logits, rep @tf.function def normalized_pairwisedot_product_sim(reps1, reps2): reps1 = reps1 / tf.norm(reps1, axis=-1)[..., None] reps2 = reps2 / tf.norm(reps2, axis=-1)[..., None] pw_dot_product = tf.cast(tf.matmul(reps1, reps2, transpose_b=True), dtype=tf.float32) return pw_dot_product @tf.function def normalized_dot_product_sim(reps1, reps2, padding_mask): # normalize reps: reps1 = reps1 / tf.norm(reps1, axis=-1)[..., None] reps2 = reps2 / tf.norm(reps2, axis=-1)[..., None] # Elementwise multiplication dot_product = tf.multiply(reps1, reps2) # Sum over last axis to get the dot product similarity between corresponding pairs dot_product = tf.reduce_sum(dot_product, axis=-1) dot_product = tf.multiply(dot_product, padding_mask[:, 0]) return dot_product @tf.function def second_order_rep_sim(reps1, reps2, padding_mask): sims1 = normalized_pairwisedot_product_sim(reps1, reps1) sims2 = normalized_pairwisedot_product_sim(reps2, reps2) #padding_mask = tf.ones((tf.shape(reps1)[0], 1)) so_sims = normalized_dot_product_sim(sims1, sims2, padding_mask) * padding_mask[:, 0] mean_sim = tf.reduce_sum(so_sims) / tf.reduce_sum(padding_mask) return mean_sim, so_sims @tf.function def compare_models(inputs, model1, model2, index1=1, index2=1, layer1=None, layer2=None, padding_symbol=None): reps1 = get_reps(inputs, model1, index=index1, layer=layer1) reps2 = get_reps(inputs, model2, index=index2, layer=layer2) reps1 = tf.reshape(reps1, (-1, tf.shape(reps1)[-1])) reps2 = tf.reshape(reps2, (-1, tf.shape(reps2)[-1])) if padding_symbol is not None and padding_symbol > -1: padding_mask = tf.cast(1.0 - (inputs == padding_symbol), dtype=tf.float32) padding_mask = tf.reshape(padding_mask, (-1, 1)) else: padding_mask = tf.ones((tf.shape(reps1)[0])) similarity_measures = second_order_rep_sim(reps1, reps2, padding_mask=padding_mask) return similarity_measures @tf.function def compare_reps(reps1, reps2, padding_symbol=None, inputs=None): reps1 = tf.reshape(reps1, (-1, tf.shape(reps1)[-1])) reps2 = tf.reshape(reps2, (-1, tf.shape(reps2)[-1])) if padding_symbol is not None and padding_symbol > -1: padding_mask = tf.cast(1.0 - (inputs == padding_symbol), dtype=tf.float32) padding_mask = tf.reshape(padding_mask, (-1, 1)) else: padding_mask = tf.ones((tf.shape(reps1)[0], 1)) similarity_measures = second_order_rep_sim(reps1, reps2, padding_mask) return similarity_measures @tf.function(experimental_relax_shapes=True) def rep_loss(reps1, reps2, padding_symbol=None, inputs=None): reps1 = tf.reshape(reps1, (-1, tf.shape(reps1)[-1])) reps2 = tf.reshape(reps2, (-1, tf.shape(reps2)[-1])) if padding_symbol is not None and padding_symbol > -1: padding_mask = 1.0 - tf.cast(inputs == padding_symbol, dtype=tf.float32) padding_mask = tf.reshape(padding_mask, (-1, 1)) else: padding_mask = tf.ones((tf.shape(reps1)[0], 1)) mean_sim, _ = second_order_rep_sim(reps1, reps2, padding_mask) return 1.0 - mean_sim
3,444
31.5
110
py
Reflect
Reflect-master/distill/online_distiller.py
import tensorflow as tf import os from distill.distill_util import get_distill_scheduler from distill.distiller import Distiller from tf2_models.train_utils import ExponentialDecayWithWarmpUp from tf2_models.trainer import OPTIMIZER_DIC from tf2_models.utils import camel2snake from inspect import isfunction import numpy as np class OnlineDistiller(Distiller): def __init__(self, hparams, distill_params, teacher_model, student_model, task, teacher_log_dir, student_log_dir, teacher_ckpt_dir, student_ckpt_dir): self.hparams = hparams self.teacher_model = teacher_model self.student_model = student_model self.task = task self.distill_params = distill_params self.temperature = tf.convert_to_tensor(distill_params.distill_temp) self.distill_loss = self.task.get_distill_loss_fn(self.distill_params) self.task_loss = self.task.get_loss_fn() self.student_metrics = self.task.metrics() self.teacher_metrics = self.task.metrics() self.task_probs_fn = self.task.get_probs_fn() self.create_student_optimizer() self.create_teacher_optimizer() self.setup_ckp_and_summary(student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir) self.setup_models(distill_params, task) self.distillrate_scheduler = get_distill_scheduler(distill_params.distill_schedule, min=distill_params.distill_min_rate, max=distill_params.student_distill_rate) def setup_ckp_and_summary(self, student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir): # Init checkpoints self.teacher_ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=self.teacher_optimizer, net=self.teacher_model) self.teacher_manager = tf.train.CheckpointManager(self.teacher_ckpt, teacher_ckpt_dir, max_to_keep=self.hparams.max_checkpoints) self.student_ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=self.student_optimizer, net=self.student_model) self.student_manager = tf.train.CheckpointManager(self.student_ckpt, student_ckpt_dir, max_to_keep=self.hparams.max_checkpoints) # Init summary student_summary_dir = os.path.join(student_log_dir, 'summaries') tf.io.gfile.makedirs(student_log_dir) self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(student_summary_dir, 'train')) tf.compat.v2.summary.experimental.set_step(self.teacher_optimizer.iterations) def create_teacher_optimizer(self): teacher_initial_learning_rate = self.distill_params.teacher_learning_rate lr_schedule = ExponentialDecayWithWarmpUp( initial_learning_rate=teacher_initial_learning_rate, decay_steps=self.distill_params.teacher_decay_steps, decay_rate=self.distill_params.teacher_decay_rate, warmup_steps=self.distill_params.teacher_warmup_steps, hold_base_rate_steps=self.distill_params.teacher_hold_base_rate_steps) self.teacher_optimizer = OPTIMIZER_DIC[self.distill_params.teacher_optimizer]( learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0) def setup_models(self, distill_params, task): x, y = iter(self.task.valid_dataset).next() self.student_model(x, padding_symbol=self.task.input_padding_symbol) self.student_model.summary() self.teacher_model(x, padding_symbol=self.task.input_padding_symbol) self.teacher_model.summary() self.student_model.compile( optimizer=self.student_optimizer, loss=self.task_loss, metrics=[self.student_metrics]) self.teacher_model.compile( optimizer=self.teacher_optimizer, loss=self.task_loss, metrics=[self.teacher_metrics]) def distill_loop(self): @tf.function(experimental_relax_shapes=True) def teacher_train_step(x, y_true): with tf.GradientTape() as tape: logits = self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True) loss = self.teacher_model.loss(y_pred=logits, y_true=y_true) if len(self.teacher_model.losses) > 0: reg_loss = tf.math.add_n(self.teacher_model.losses) else: reg_loss = 0 final_loss = loss + reg_loss grads = tape.gradient(final_loss, self.teacher_model.trainable_weights) self.teacher_model.optimizer.apply_gradients(zip(grads, self.teacher_model.trainable_weights), name="teacher_optimizer") return logits, final_loss @tf.function(experimental_relax_shapes=True) def student_train_step(x, y, y_true): ''' Training step for the student model (this is the only training step for offline distillation). :param x: input :param y: output of the teacher model, used to compute distill loss :param y_true: actual outputs, used to compute actual loss :return: distill_loss actual_loss ''' student_distill_rate = self.distillrate_scheduler(self.student_optimizer.iterations) student_gold_rate = 1 - student_distill_rate with tf.GradientTape() as tape: logits = self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True) distill_loss = self.distill_loss(y_pred=logits, y_true=y) if len(self.student_model.losses) > 0: reg_loss = tf.math.add_n(self.student_model.losses) else: reg_loss = 0 actual_loss = self.task_loss(y_pred=logits, y_true=y_true) final_loss = student_distill_rate * distill_loss + \ student_gold_rate * actual_loss + reg_loss grads = tape.gradient(final_loss, self.student_model.trainable_weights) self.student_model.optimizer.apply_gradients(zip(grads, self.student_model.trainable_weights), name="student_optimizer") return distill_loss, actual_loss, student_distill_rate @tf.function def epoch_loop(): step = 0 for x, y in self.task.train_dataset: teacher_logits, teacher_loss = teacher_train_step(x, y) teacher_probs = self.task_probs_fn(logits=teacher_logits, labels=y, temperature=self.temperature) soft_targets = tf.stop_gradient(teacher_probs) distill_loss, actual_loss, student_distill_rate = student_train_step(x=x, y=soft_targets, y_true=y) # Log every 200 batches. if step % 200 == 0: with tf.summary.experimental.summary_scope("student_train"): tf.summary.scalar('student_learning_rate', self.student_model.optimizer.learning_rate(self.student_model.optimizer.iterations)) tf.summary.scalar('fine_distill_loss', distill_loss) tf.summary.scalar('student_distill_rate', student_distill_rate) with tf.summary.experimental.summary_scope("teacher_train"): tf.summary.scalar('teacher_loss', teacher_loss) tf.summary.scalar('teacher_learning_rate', self.teacher_model.optimizer.learning_rate(self.teacher_model.optimizer.iterations)) step += 1 if step == self.task.n_train_batches: with tf.summary.experimental.summary_scope("student_train"): tf.summary.scalar('distill_loss', distill_loss) tf.summary.scalar('actual_loss', actual_loss) break with self.summary_writer.as_default(): num_epochs = self.distill_params.n_epochs for epoch in tf.range(num_epochs): epoch_loop() teacher_eval_results = self.teacher_model.evaluate(self.task.valid_dataset, steps=self.task.n_valid_batches) # Evaluate Teacher with tf.summary.experimental.summary_scope("eval_teacher"): for i, m_name in enumerate(self.teacher_model.metrics_names): tf.summary.scalar(m_name, teacher_eval_results[i]) # Evaluate Student student_eval_results = self.student_model.evaluate(self.task.valid_dataset, steps=self.task.n_valid_batches) with tf.summary.experimental.summary_scope("eval_student"): for i, m_name in enumerate(self.student_model.metrics_names): tf.summary.scalar(m_name, student_eval_results[i]) pow2 = [0,1,2,4,8,16,32,64,128,256,512] if self.hparams.keep_some_checkpoints: if (epoch in pow2) or (epoch == (self.distill_params.n_epochs - 1)): self.save_student() self.save_teacher() else: self.save_student() self.save_teacher() def save_teacher(self): self.teacher_ckpt.step.assign_add(1) save_path = self.teacher_manager.save() tf.print("Saved teacher checkpoint", save_path)
9,001
43.127451
132
py
Reflect
Reflect-master/distill/model.py
class Model(object): def apply(self, examples): raise NotImplementedError def update(self, loss): raise NotImplementedError
136
21.833333
29
py
Reflect
Reflect-master/distill/distill_main.py
''' Code to apply the distillation process for a teacher and a student model. Run: python distill/distill_main.py \ --task=word_sv_agreement_vp \ --teacher_exp_name=small_lstm_v4_0.0001_withl2 \ --teacher_model=cl_lstm \ --teacher_config=small_lstm_v4 \ --student_exp_name=distilled0 \ --student_model=cl_gpt2 \ --student_config=small_gpt_v9 \ --distill_mode=offline ''' from distill.distiller import Distiller from distill.online_distiller import OnlineDistiller from util import constants from util.config_util import get_distill_params import os from util.config_util import get_model_params, get_task_params, get_train_params from absl import flags, logging import sys import tensorflow as tf from util.models import MODELS from util.tasks import TASKS FLAGS = flags.FLAGS flags.DEFINE_string('logdir', 'logs', 'log dir') flags.DEFINE_string('chkpt_dir', 'tf_ckpts', 'checkpoint dir') flags.DEFINE_string('task', 'word_sv_agreement_lm', 'sv_agreement_lm | word_sv_agreement_lm | word_sv_agreement_vp') flags.DEFINE_string('distill_config', 'base', ' distillation hparams set') flags.DEFINE_string('teacher_exp_name', 'trial4', 'experiment directory') flags.DEFINE_string('teacher_model', 'lm_lstm', 'lm_lstm | lm_gpt2') flags.DEFINE_string('student_exp_name', 'trial1', 'experiment directory') flags.DEFINE_string('student_model', 'lm_lstm', 'lm_lstm | lm_gpt2') flags.DEFINE_string('student_config', 'base', 'base | small_lstm ') flags.DEFINE_string('teacher_config', 'base', 'base | small_lstm ') flags.DEFINE_string('distill_mode', 'offline', 'offline | online | off_schdld | on_schdld') flags.DEFINE_integer('max_checkpoints', 2, 'keep_checkpoint_every_n_hours passed to training manager') flags.DEFINE_boolean('keep_some_checkpoints', False, 'keep_checkpoint_every_n_hours passed to training manager') flags.DEFINE_string('keep_checkpoint_every_n_hours',None, 'keep_checkpoint_every_n_hours passed to training manager') flags.DEFINE_integer('batch_size', 64, 'batch_size') FLAGS(sys.argv) hparams = flags.FLAGS def create_and_load_models(): if hasattr(task.databuilder, 'sentence_encoder'): cl_token = task.databuilder.sentence_encoder().encode(constants.bos) else: cl_token = 0 teacher_model = MODELS[hparams.teacher_model]( hparams=get_model_params(task, hparams.teacher_model, hparams.teacher_config), cl_token=cl_token) student_model = MODELS[hparams.student_model]( hparams=get_model_params(task, hparams.student_model, hparams.student_config), cl_token=cl_token) teacher_log_dir = os.path.join(hparams.logdir, task.name, '_'.join([hparams.distill_mode,hparams.distill_config, "teacher",teacher_model.model_name,hparams.teacher_config,hparams.teacher_exp_name])) teacher_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name, '_'.join([teacher_model.model_name, hparams.teacher_config,hparams.teacher_exp_name])) student_log_dir = os.path.join(hparams.logdir, task.name, '_'.join([hparams.distill_mode,hparams.distill_config, "teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name, "student", student_model.model_name, str(hparams.student_config), hparams.student_exp_name])) student_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name, '_'.join([hparams.distill_mode,hparams.distill_config, "teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name, "student",student_model.model_name, str(hparams.student_config),hparams.student_exp_name])) return teacher_model, student_model, teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir DISTILLER = {'offline': Distiller, 'online': OnlineDistiller, } if __name__ == '__main__': # Create task task = TASKS[hparams.task](get_task_params(batch_size=hparams.batch_size)) # Create the Model teacher_model, student_model, \ teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir = create_and_load_models() distiller = DISTILLER[hparams.distill_mode](hparams=hparams, distill_params=get_distill_params(hparams.distill_config), teacher_model=teacher_model, student_model=student_model, task=task, teacher_ckpt_dir=teacher_ckpt_dir, teacher_log_dir=teacher_log_dir, student_ckpt_dir=student_ckpt_dir, student_log_dir=student_log_dir, ) # Restore Models distiller.restore_teacher() distiller.restore_student() # Run the distillation loop distiller.distill_loop()
5,174
47.820755
136
py
Reflect
Reflect-master/distill/distill_mnist.py
''' Code to apply the distillation process for a teacher and a student model. Run: python distill/distill_main.py \ --task=word_sv_agreement_vp \ --teacher_exp_name=small_lstm_v4_0.0001_withl2 \ --teacher_model=cl_lstm \ --teacher_config=small_lstm_v4 \ --student_exp_name=distilled0 \ --student_model=cl_gpt2 \ --student_config=small_gpt_v9 \ --distill_mode=offline ''' from distill.distiller import Distiller from distill.online_distiller import OnlineDistiller from distill.scheduled_distiller import ScheduledDistiller from util import constants from util.config_util import get_distill_params import os from util.config_util import get_model_params, get_task_params, get_train_params from absl import flags, logging import sys import tensorflow as tf from util.models import MODELS from util.tasks import TASKS FLAGS = flags.FLAGS flags.DEFINE_string('logdir', 'logs', 'log dir') flags.DEFINE_string('chkpt_dir', 'tf_ckpts', 'checkpoint dir') flags.DEFINE_string('task', 'word_sv_agreement_lm', 'sv_agreement_lm | word_sv_agreement_lm | word_sv_agreement_vp') flags.DEFINE_string('distill_config', 'base', ' distillation hparams set') flags.DEFINE_string('teacher_exp_name', 'trial4', 'experiment directory') flags.DEFINE_string('teacher_model', 'lm_lstm', 'lm_lstm | lm_gpt2') flags.DEFINE_string('student_exp_name', 'trial1', 'experiment directory') flags.DEFINE_string('student_model', 'lm_lstm', 'lm_lstm | lm_gpt2') flags.DEFINE_string('student_config', 'base', 'base | small_lstm ') flags.DEFINE_string('teacher_config', 'base', 'base | small_lstm ') flags.DEFINE_string('distill_mode', 'offline', 'offline | online | off_schdld | on_schdld') flags.DEFINE_string('keep_checkpoint_every_n_hours',None, 'keep_checkpoint_every_n_hours passed to training manager') FLAGS(sys.argv) hparams = flags.FLAGS def create_and_load_models(): teacher_model = MODELS[hparams.teacher_model]( hparams=get_model_params(task, hparams.teacher_model, hparams.teacher_config)) student_model = MODELS[hparams.student_model]( hparams=get_model_params(task, hparams.student_model, hparams.student_config)) teacher_log_dir = os.path.join(hparams.logdir, task.name, '_'.join([hparams.distill_mode,hparams.distill_config, "teacher",teacher_model.model_name,hparams.teacher_config,hparams.teacher_exp_name])) teacher_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name, '_'.join([teacher_model.model_name, hparams.teacher_config,hparams.teacher_exp_name])) student_log_dir = os.path.join(hparams.logdir, task.name, '_'.join([hparams.distill_mode,hparams.distill_config, "teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name, "student", student_model.model_name, str(hparams.student_config), hparams.student_exp_name])) student_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name, '_'.join([hparams.distill_mode,hparams.distill_config, "teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name, "student",student_model.model_name, str(hparams.student_config),hparams.student_exp_name])) return teacher_model, student_model, teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir DISTILLER = {'offline': Distiller, 'online': OnlineDistiller, 'off_schdld': ScheduledDistiller} if __name__ == '__main__': # Create task task = TASKS[hparams.task](get_task_params()) # Create the Model teacher_model, student_model, \ teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir = create_and_load_models() distiller = DISTILLER[hparams.distill_mode](hparams=hparams, distill_params=get_distill_params(hparams.distill_config), teacher_model=teacher_model, student_model=student_model, task=task, teacher_ckpt_dir=teacher_ckpt_dir, teacher_log_dir=teacher_log_dir, student_ckpt_dir=student_ckpt_dir, student_log_dir=student_log_dir, ) # Restore Models distiller.restore_teacher() distiller.restore_student() # Run the distillation loop distiller.distill_loop()
4,778
47.272727
136
py
Reflect
Reflect-master/distill/distill_util.py
import tensorflow as tf from tf2_models.metrics import distill_loss, sequence_distill_loss @tf.function(experimental_relax_shapes=True) def get_topk_mask(inputs, k): inputs_shape = tf.shape(inputs) inputs_shape = tf.cast(inputs_shape, dtype=tf.int64) values, indices = tf.nn.top_k(inputs, k=k, sorted=False) indices = tf.cast(indices, dtype=tf.int64) k = tf.cast(k, dtype=tf.int64) temp_indices = tf.meshgrid(*[tf.range(d, dtype=tf.int64) for d in (tf.unstack( inputs_shape[:(inputs.get_shape().ndims - 1)]) + [k])], indexing='ij') temp_indices = tf.stack(temp_indices[:-1] + [indices], axis=-1) full_indices = tf.reshape(temp_indices, [-1, inputs.get_shape().ndims]) values = tf.reshape(values, [-1]) mask_vals = tf.ones_like(values, dtype=tf.int64) full_indices = tf.cast( full_indices, dtype=tf.int64) mask_st = tf.SparseTensor(indices=full_indices, values=mask_vals, dense_shape=inputs_shape) mask = tf.sparse.to_dense(tf.sparse.reorder(mask_st)) return mask @tf.function(experimental_relax_shapes=True) def get_topk_masked_probs(logits, labels, temperature, k=100, padding_symbol=0): topk_mask = (1 - tf.cast(get_topk_mask(logits, k), dtype=tf.float32)) * -10e8 teacher_probs = tf.nn.softmax((logits + topk_mask) / temperature, axis=-1) sequence_mask = tf.cast(labels != padding_symbol, dtype=tf.float32) masked_teacher_probs = teacher_probs * sequence_mask[..., None] + tf.eye(tf.shape(teacher_probs)[-1])[0] * ( 1 - sequence_mask[..., None]) return masked_teacher_probs @tf.function(experimental_relax_shapes=True) def get_masked_probs(logits, labels, temperature, padding_symbol=0): teacher_probs = tf.nn.softmax(logits / temperature, axis=-1) sequence_mask = tf.cast(labels != padding_symbol, dtype=tf.float32) masked_teacher_probs = teacher_probs * sequence_mask[..., None] + tf.eye(tf.shape(teacher_probs)[-1])[0] * ( 1 - sequence_mask[..., None]) return masked_teacher_probs @tf.function(experimental_relax_shapes=True) def get_probs(logits, labels, temperature): teacher_probs = tf.nn.softmax(logits / temperature, axis=-1) return teacher_probs class DistillLoss(tf.keras.losses.Loss): def __init__(self, padding_symbol=0, tmp=1.0, **kwargs): super(DistillLoss, self).__init__(**kwargs) self.tmp = tf.Variable(tmp, dtype=tf.float32, name="temp") self.padding_symbol = tf.Variable(padding_symbol, dtype=tf.int64, name="padding_symbol") def call(self, y_true, y_pred): return distill_loss(y_true, y_pred, self.tmp) class SequenceDistillLoss(tf.keras.losses.Loss): def __init__(self, padding_symbol=0, tmp=1.0, **kwargs): super(SequenceDistillLoss, self).__init__(**kwargs) self.tmp = tf.Variable(tmp, dtype=tf.float32, name="tmp") self.padding_symbol = tf.Variable(padding_symbol, dtype=tf.int64, name="padding_symbol") def call(self, y_true, y_pred): return sequence_distill_loss(y_true, y_pred, self.padding_symbol, self.tmp) def get_distill_scheduler(schedule, min=0.0, max=1.0, decay_steps=10000): if schedule is "exp": scheduler = tf.keras.optimizers.schedules.ExponentialDecay( max, decay_steps=1000, decay_rate=0.96, staircase=True) elif schedule is 'crs': scheduler = tf.keras.experimental.CosineDecayRestarts( max, decay_steps, t_mul=2.0, m_mul=0.9, alpha=0.001, ) elif schedule is 'lnr': a = (max - min) / decay_steps scheduler = lambda x: max - a*x elif schedule is 'stp': scheduler = lambda x: max if x < decay_steps else min else: scheduler = lambda x: max return scheduler
3,653
35.54
110
py
Reflect
Reflect-master/distill/distiller.py
import tensorflow as tf import os from distill.distill_util import get_distill_scheduler from tf2_models.train_utils import ExponentialDecayWithWarmpUp from tf2_models.trainer import OPTIMIZER_DIC import numpy as np class Distiller(object): ''' Pipeline for offline distillation. ''' def __init__(self, hparams, distill_params, teacher_model, student_model, task, teacher_log_dir, student_log_dir, teacher_ckpt_dir, student_ckpt_dir): self.teacher_model = teacher_model self.student_model = student_model self.task = task self.distill_params = distill_params self.temperature = tf.convert_to_tensor(distill_params.distill_temp) self.distill_loss = self.task.get_distill_loss_fn(self.distill_params) self.task_loss = self.task.get_loss_fn() self.metrics = self.task.metrics() self.task_probs_fn = self.task.get_probs_fn() self.hparams = hparams self.create_student_optimizer() self.setup_ckp_and_summary(student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir) self.setup_models(distill_params, task) self.distillrate_scheduler = get_distill_scheduler(distill_params.distill_schedule, min=distill_params.distill_min_rate, max=distill_params.student_distill_rate) def create_student_optimizer(self): student_initial_learning_rate = self.distill_params.student_learning_rate if 'crs' in self.distill_params.schedule: lr_schedule = ( tf.keras.experimental.CosineDecayRestarts( student_initial_learning_rate, first_decay_steps=self.distill_params.student_decay_steps, t_mul=5.0, #0.2 m_mul=self.distill_params.student_decay_rate, alpha=0.001, )) else: lr_schedule = ExponentialDecayWithWarmpUp( initial_learning_rate=student_initial_learning_rate, decay_steps=self.distill_params.student_decay_steps, decay_rate=self.distill_params.student_decay_rate, warmup_steps=self.distill_params.student_warmup_steps, hold_base_rate_steps=self.distill_params.student_hold_base_rate_steps) self.student_optimizer = OPTIMIZER_DIC[self.distill_params.student_optimizer]( learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0) def setup_ckp_and_summary(self, student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir): # Init checkpoints self.teacher_ckpt = tf.train.Checkpoint(net=self.teacher_model) self.teacher_manager = tf.train.CheckpointManager(self.teacher_ckpt, teacher_ckpt_dir, max_to_keep=self.hparams.max_checkpoints) self.student_ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=self.student_optimizer, net=self.student_model) self.student_manager = tf.train.CheckpointManager(self.student_ckpt, student_ckpt_dir, keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours, max_to_keep=self.hparams.max_checkpoints) # Init summary student_summary_dir = os.path.join(student_log_dir, 'summaries') tf.io.gfile.makedirs(student_log_dir) self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(student_summary_dir, 'train')) tf.compat.v2.summary.experimental.set_step(self.student_optimizer.iterations) def setup_models(self, distill_params, task): x, y = iter(self.task.valid_dataset).next() self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True) self.student_model.summary() self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True) self.teacher_model.summary() self.student_model.compile( optimizer=self.student_optimizer, loss=self.task_loss, metrics=[self.metrics]) self.teacher_model.compile( loss=self.task_loss, metrics=[self.metrics]) def restore_teacher(self): ''' Restore the teacher model from its checkpoint. ''' self.teacher_ckpt.restore(self.teacher_manager.latest_checkpoint) if self.teacher_manager.latest_checkpoint: print("Restored teacher from {}".format(self.teacher_manager.latest_checkpoint)) else: print("Initializing teacher from scratch.") def restore_student(self): ''' Restore the student model from its checkpoint. ''' self.student_ckpt.restore(self.student_manager.latest_checkpoint) if self.student_manager.latest_checkpoint: print("Restored student from {}".format(self.student_manager.latest_checkpoint)) else: print("Initializing student from scratch.") def save_student(self): self.student_ckpt.step.assign_add(1) save_path = self.student_manager.save() tf.print("Saved student checkpoint", save_path) def distill_loop(self): ''' Offline Distillation main loop. ''' # logging.info('Distribute strategy: mirrored.') # strategy = tf.distribute.MirroredStrategy() # train_dataset = strategy.experimental_distribute_dataset(self.task.train_dataset) # valid_dataset = strategy.experimental_distribute_dataset(self.task.valid_dataset) @tf.function(experimental_relax_shapes=True) def student_train_step(x, teacher_y, y_true): ''' Training step for the student model (this is the only training step for offline distillation). :param x: input :param y: output of the teacher model, used to compute distill loss :param y_true: actual outputs, used to compute actual loss :return: distill_loss actual_loss ''' student_distill_rate = self.distillrate_scheduler(self.student_optimizer.iterations) student_gold_rate = 1 - student_distill_rate with tf.GradientTape() as tape: logits = self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True) distill_loss = self.distill_loss(y_pred=logits, y_true=teacher_y) reg_loss = tf.math.add_n(self.student_model.losses) actual_loss = self.task_loss(y_pred=logits, y_true=y_true) final_loss = student_distill_rate * distill_loss + \ student_gold_rate * actual_loss + reg_loss grads = tape.gradient(final_loss, self.student_model.trainable_weights) self.student_model.optimizer.apply_gradients(zip(grads, self.student_model.trainable_weights), name="student_optimizer") return distill_loss, actual_loss, student_distill_rate @tf.function def epoch_loop(): step = 0 for x,y in self.task.train_dataset: teacher_logits = self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True) teacher_probs = self.task_probs_fn(logits=teacher_logits, labels=y, temperature=self.temperature) distill_loss, actual_loss, student_distill_rate = student_train_step(x=x, teacher_y=teacher_probs, y_true=y) # Log every 200 batches. if step % 200 == 0: with tf.summary.experimental.summary_scope("student_train"): tf.summary.scalar('student_learning_rate', self.student_model.optimizer.learning_rate(self.student_model.optimizer.iterations), ) tf.summary.scalar('fine_distill_loss', distill_loss) tf.summary.scalar('student_distill_rate', student_distill_rate) step += 1 # Stop at the end of the epoch if (step % self.task.n_train_batches) == 0: with tf.summary.experimental.summary_scope("student_train"): tf.summary.scalar('distill_loss', distill_loss) tf.summary.scalar('actual_loss', actual_loss) break @tf.function def summarize(teacher_eval_results, student_eval_results): with tf.summary.experimental.summary_scope("eval_teacher"): for i, m_name in enumerate(self.teacher_model.metrics_names): tf.summary.scalar(m_name, teacher_eval_results[i]) with tf.summary.experimental.summary_scope("eval_student"): for i, m_name in enumerate(self.student_model.metrics_names): tf.summary.scalar(m_name, student_eval_results[i]) with self.summary_writer.as_default(): for epoch in np.arange(self.distill_params.n_epochs): epoch_loop() # Evaluate Teacher teacher_eval_results = self.teacher_model.evaluate(self.task.valid_dataset, steps=self.task.n_valid_batches) # Evaluate Student student_eval_results = self.student_model.evaluate(self.task.valid_dataset, steps=self.task.n_valid_batches) summarize(teacher_eval_results, student_eval_results) pow2 = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512] if self.hparams.keep_some_checkpoints: if (epoch in pow2) or (epoch == (self.distill_params.n_epochs - 1)): self.save_student() else: self.save_student()
9,284
44.292683
132
py
Reflect
Reflect-master/tf2_models/embedding.py
import tensorflow as tf from tf2_models.common_layers import get_initializer, shape_list class SharedEmbeddings(tf.keras.layers.Layer): """Construct shared token embeddings. """ def __init__(self, vocab_size, hidden_size, initializer_range=None, regularizer=None, **kwargs): super(SharedEmbeddings, self).__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range self.regularizer = regularizer def build(self, input_shape): """Build shared word embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), regularizer=self.regularizer) super(SharedEmbeddings, self).build(input_shape) def call(self, inputs, mode="embedding"): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) mode: string, a valid value is one of "embedding" and "linear". Returns: outputs: (1) If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length, embedding_size]; (2) mode == "linear", output linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError("mode {} is not valid.".format(mode)) def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size])
2,633
38.313433
137
py
Reflect
Reflect-master/tf2_models/lm_transformer.py
import tensorflow as tf from tf2_models.common_layers import get_initializer, shape_list from tf2_models.embedding import SharedEmbeddings from tf2_models.transformer_layers import Block from tf2_models.transformers import * class LmGPT2(tf.keras.Model): def __init__(self, hparams, scope='lm_gpt2', *inputs, **kwargs): del kwargs['cl_token'] super(LmGPT2, self).__init__(hparams, *inputs, **kwargs) self.scope = scope self.rep_index = 1 self.rep_layer = None self.model_name = '_'.join([self.scope, 'h-'+str(hparams.embedding_dim), 'd-'+str(hparams.depth), 'rdrop-'+str(hparams.resid_pdrop), 'adrop-' + str(hparams.attn_pdrop), 'indrop-'+str(hparams.embd_pdrop)]) self.create_vars(hparams) @tf.function def create_vars(self, hparams): self.transformer = GPT2(hparams, name='transformer') def call(self, inputs, padding_symbol=None, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] lm_logits = self.transformer.wte(hidden_states, mode="linear") #outputs = (lm_logits,) + transformer_outputs[1:] return lm_logits # lm_logits, presents, (all hidden_states), (attentions) def detailed_call(self, inputs, padding_symbol=None, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] lm_logits = self.transformer.wte(hidden_states, mode="linear") outputs = (lm_logits,) + transformer_outputs return outputs # lm_logits, presents, (all hidden_states), (attentions) class LmGPT2SharedWeights(LmGPT2): def __init__(self, hparams, scope='lm_gpt2_shared_weights', *inputs, **kwargs): super(LmGPT2SharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs) @tf.function def create_vars(self, hparams): self.transformer = GPT2SharedWeights(hparams, name='shared_transformer') def call(self, inputs, padding_symbol=None, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] lm_logits = self.transformer.wte(hidden_states, mode="linear") #outputs = (lm_logits,) + transformer_outputs[1:] return lm_logits # lm_logits, presents, (all hidden_states), (attentions) class ClassifierGPT2(tf.keras.Model): def __init__(self, hparams, scope='cl_gpt2',*inputs, **kwargs): self.cl_token = kwargs['cl_token'] del kwargs['cl_token'] super(ClassifierGPT2, self).__init__(hparams, *inputs, **kwargs) self.rep_index = 2 self.rep_layer = None self.scope = scope self.hparams = hparams self.model_name = '_'.join([self.scope, 'h-'+str(hparams.embedding_dim), 'd-'+str(hparams.depth), 'rdrop-'+str(hparams.resid_pdrop), 'adrop-' + str(hparams.attn_pdrop), 'indrop-'+str(hparams.embd_pdrop)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.0001) self.create_vars(**kwargs) #@tf.function def create_vars(self,**kwargs): self.transformer = GPT2(self.hparams, name='transformer', **kwargs) self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels, kernel_initializer=get_initializer(self.hparams.initializer_range), name='e2c') def call(self, inputs, padding_symbol=None, **kwargs): @tf.function(experimental_relax_shapes=True) def _call(batch_size, inputs, transformer_outputs): mask = tf.cast(inputs != 0, dtype=tf.int32) inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1 batch_indices = tf.range(batch_size) indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1) hidden_states = tf.gather_nd(transformer_outputs[0], indices) cl_logits = self.e2c(hidden_states) return cl_logits # Add CL token: batch_size = tf.shape(inputs)[0] #cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1)) #cl_tokens = tf.tile(cl_token, (batch_size, 1)) #inputs = tf.concat([cl_tokens, inputs], axis=-1) transformer_outputs = self.transformer(inputs, **kwargs) cl_logits = _call(batch_size, inputs, transformer_outputs) return cl_logits def detailed_call(self, inputs, padding_symbol=None, **kwargs): @tf.function(experimental_relax_shapes=True) def _call(batch_size, inputs, transformer_outputs): mask = tf.cast(inputs != 0, dtype=tf.int32) inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1 batch_indices = tf.range(batch_size) indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1) hidden_states = tf.gather_nd(transformer_outputs[0], indices) cl_logits = self.e2c(hidden_states) return cl_logits, hidden_states # Add CL token: batch_size = tf.shape(inputs)[0] #cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1)) #cl_tokens = tf.tile(cl_token, (batch_size, 1)) #inputs = tf.concat([cl_tokens, inputs], axis=-1) transformer_outputs = self.transformer(inputs, **kwargs) cl_logits, hidden_states = _call(batch_size, inputs, transformer_outputs) outputs = (cl_logits, hidden_states) + transformer_outputs return outputs class ClassifierGPT2SharedWeights(ClassifierGPT2): def __init__(self, hparams, scope='cl_gpt2_shared_weights', *inputs, **kwargs): super(ClassifierGPT2SharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs) @tf.function def create_vars(self): self.transformer = GPT2SharedWeights(self.hparams, name='shared_transformer') self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels, kernel_initializer=get_initializer(self.hparams.initializer_range), name='e2c') class ClassifierBERT(tf.keras.Model): def __init__(self, hparams, scope='cl_bert',*inputs, **kwargs): self.cl_token = kwargs['cl_token'] del kwargs['cl_token'] super(ClassifierBERT, self).__init__(hparams, *inputs, **kwargs) self.scope = scope self.hparams = hparams self.rep_index = 2 self.rep_layer = None self.model_name = '_'.join([self.scope, 'h-'+str(hparams.embedding_dim), 'd-'+str(hparams.depth), 'rdrop-'+str(hparams.resid_pdrop), 'adrop-' + str(hparams.attn_pdrop), 'indrop-'+str(hparams.embd_pdrop)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.0001) self.create_vars(**kwargs) #@tf.function def create_vars(self,**kwargs): self.transformer = Bert(self.hparams, name='transformer', **kwargs) self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels, kernel_initializer=get_initializer(self.hparams.initializer_range), name='e2c') def call(self, inputs, padding_symbol=None, add_cls=True, **kwargs): @tf.function(experimental_relax_shapes=True) def _call(batch_size, inputs, transformer_outputs): #mask = tf.cast(inputs != 0, dtype=tf.int32) #inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1 #batch_indices = tf.range(batch_size) #indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1) hidden_states = transformer_outputs[0][:,0]#tf.gather_nd(transformer_outputs[0], indices) cl_logits = self.e2c(hidden_states, **kwargs) return cl_logits # Add CL token: batch_size = tf.shape(inputs)[0] if add_cls: cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1)) cl_tokens = tf.tile(cl_token, (batch_size, 1)) inputs = tf.concat([cl_tokens, inputs], axis=-1) transformer_outputs = self.transformer(inputs, **kwargs) cl_logits = _call(batch_size, inputs, transformer_outputs) return cl_logits def detailed_call(self, inputs, padding_symbol=None, add_cls=True, **kwargs): @tf.function(experimental_relax_shapes=True) def _call(batch_size, inputs, transformer_outputs): hidden_states = transformer_outputs[0][:, 0] cl_logits = self.e2c(hidden_states) return cl_logits, hidden_states # Add CL token: batch_size = tf.shape(inputs)[0] if add_cls: cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1)) cl_tokens = tf.tile(cl_token, (batch_size, 1)) inputs = tf.concat([cl_tokens, inputs], axis=-1) transformer_outputs = self.transformer(inputs, **kwargs) cl_logits, hidden_states = _call(batch_size, inputs, transformer_outputs) reps_start_index = 1 if add_cls else 0 outputs = (cl_logits, hidden_states, transformer_outputs[0][:,reps_start_index:,:]) + transformer_outputs return outputs def get_input_embeddings(self, inputs, add_cls=True, **kwargs): # Add CL token: batch_size = tf.shape(inputs)[0] if add_cls: cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1, 1)) cl_tokens = tf.tile(cl_token, (batch_size, 1)) inputs = tf.concat([cl_tokens, inputs], axis=-1) outputs = self.transformer.get_input_embeddings(inputs, **kwargs) return outputs def call_with_embeddings(self, input_embeddings, input_shape, padding_mask, past , **kwargs): transformer_outputs = self.transformer.call_with_embeddings(input_embeddings=input_embeddings, input_shape=input_shape, padding_mask=padding_mask, past=past, **kwargs) hidden_states = transformer_outputs[0][:, 0] cl_logits = self.e2c(hidden_states) return cl_logits, hidden_states class ClassifierBERTSharedWeights(ClassifierBERT): def __init__(self, hparams, scope='cl_bert_shared', *inputs, **kwargs): super(ClassifierBERTSharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs) # @tf.function def create_vars(self, **kwargs): self.transformer = BertSharedWeights(self.hparams, name='transformer', **kwargs) self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels, kernel_initializer=get_initializer(self.hparams.initializer_range), name='e2c')
10,814
39.965909
109
py
Reflect
Reflect-master/tf2_models/ff.py
import tensorflow as tf import numpy as np class VanillaFF(tf.keras.models.Sequential): def __init__(self, hparams, scope="cl_vff", *inputs, **kwargs): if 'cl_token' in kwargs: del kwargs['cl_token'] super(VanillaFF, self).__init__() self.scope = scope self.hparams = hparams self.model_name = '_'.join([self.scope, 'h-' + '.'.join([str(x) for x in self.hparams.hidden_dim]), 'd-' + str(self.hparams.depth), 'hdrop-' + str(self.hparams.hidden_dropout_rate), 'indrop-' + str(self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00001, l2=0.00001) self.create_vars() self.rep_index = 1 self.rep_layer = -1 def create_vars(self): self.flat = tf.keras.layers.Flatten() # self.batch_norm = tf.keras.layers.BatchNormalization() # self.batch_norm.trainable = True self.indrop = tf.keras.layers.Dropout(self.hparams.input_dropout_rate) self.activation = tf.keras.layers.Activation('relu') self.hidden_layers = [] self.hidden_batch_norms = [] self.hidden_dropouts = [] for i in np.arange(self.hparams.depth): self.hidden_layers.append(tf.keras.layers.Dense(self.hparams.hidden_dim[i], activation=None, #'relu', kernel_regularizer=self.regularizer)) self.hidden_batch_norms.append(tf.keras.layers.BatchNormalization()) self.hidden_batch_norms[i].trainable = True self.hidden_dropouts.append(tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)) self.final_dense = tf.keras.layers.Dense(self.hparams.output_dim, kernel_regularizer=self.regularizer) def call(self, inputs, padding_symbol=None, training=None, **kwargs): x = self.flat(inputs, **kwargs) # x = self.batch_norm(x, training=training, **kwargs) x = self.indrop(x, training=training, **kwargs) for i in np.arange(self.hparams.depth): x = self.hidden_layers[i](x, training=training, **kwargs) x = self.activation(x) x = self.hidden_batch_norms[i](x, training=training, **kwargs) x = self.hidden_dropouts[i](x, training=training, **kwargs) logits = self.final_dense(x, training=training, **kwargs) return logits def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs): layer_activations = [] x = self.flat(inputs, **kwargs) x = self.indrop(x, training=None, **kwargs) layer_activations.append(x) for i in np.arange(self.hparams.depth): x = self.hidden_layers[i](x, training=training, **kwargs) x = self.activation(x) x = self.hidden_batch_norms[i](x, training=training, **kwargs) x = self.hidden_dropouts[i](x, training=training, **kwargs) layer_activations.append(x) pnltimt = x logits = self.final_dense(x, training=None, **kwargs) return logits, pnltimt, layer_activations
3,116
36.107143
92
py
Reflect
Reflect-master/tf2_models/common_layers.py
import tensorflow as tf import numpy as np from tensorflow.python.framework import tensor_shape from tensorflow.python.util import nest def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def shape_list(x): """Deal with dynamic shape in tensorflow cleanly.""" static = x.shape.as_list() dynamic = tf.shape(x) return [dynamic[i] if s is None else s for i, s in enumerate(static)] def get_initializer(initializer_range=0.02): """Creates a `tf.initializers.truncated_normal` with the given range. Args: initializer_range: float, initializer range for stddev. Returns: TruncatedNormal initializer with stddev = `initializer_range`. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) def make_variable_state_initializer(**kwargs): def variable_state_initializer(shape, batch_size, dtype, index): args = kwargs.copy() if args.get('name'): args['name'] = args['name'] + '_' + str(index) else: args['name'] = 'init_state_' + str(index) args['shape'] = shape args['dtype'] = dtype var = tf.get_variable(**args) var = tf.expand_dims(var, 0) var = tf.tile(var, tf.pack([batch_size] + [1] * len(shape))) var.set_shape(_state_size_with_prefix(shape, prefix=[None])) return var return variable_state_initializer def get_initial_cell_state(cell, initializer, batch_size, dtype): """Return state tensor(s), initialized with initializer. Args: cell: RNNCell. batch_size: int, float, or unit Tensor representing the batch size. initializer: function with two arguments, shape and dtype, that determines how the state is initialized. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size x state_size]` initialized according to the initializer. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size x s]` for each s in `state_size`. """ state_size = cell.state_size if nest.is_sequence(state_size): state_size_flat = nest.flatten(state_size) init_state_flat = [ initializer(s, batch_size, dtype, i) for i, s in enumerate(state_size_flat)] init_state = nest.pack_sequence_as(structure=state_size, flat_sequence=init_state_flat) else: init_state_size = state_size init_state = initializer(init_state_size, batch_size, dtype, None) return init_state def _generate_variable_state(batch_size_tensor, state_size, dtype): """Generate a variable tensor with shape [batch_size, state_size].""" def create_variable(unnested_state_size): flat_dims = tensor_shape.as_shape(unnested_state_size).as_list() init_state_size = [batch_size_tensor] + flat_dims return tf.Variable(init_state_size, dtype=dtype) if nest.is_sequence(state_size): return nest.map_structure(create_variable, state_size) else: return create_variable(state_size)
3,398
34.041237
72
py
Reflect
Reflect-master/tf2_models/lm_lstm.py
import absl import tensorflow as tf import numpy as np from tensorboard.compat.tensorflow_stub import tensor_shape from tensorflow.python.util import nest from tf2_models.common_layers import get_initializer from tf2_models.embedding import SharedEmbeddings from tf2_models.utils import create_init_var class LmLSTM(tf.keras.Model): def __init__(self, hparams, scope="lm_lstm",*inputs, **kwargs): del kwargs['cl_token'] super(LmLSTM, self).__init__(*inputs, **kwargs) self.hparams = hparams self.scope = scope self.rep_index = 2 self.rep_layer = -1 self.model_name = '_'.join([self.scope, 'em-'+str(self.hparams.embedding_dim), 'h-'+str(self.hparams.hidden_dim), 'd-'+str(self.hparams.depth), 'hdrop-'+str(self.hparams.hidden_dropout_rate), 'indrop-'+str(self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.00001) self.create_vars() @tf.function def create_vars(self): self.input_embedding = tf.compat.v2.keras.layers.Embedding(input_dim=self.hparams.input_dim, output_dim=self.hparams.embedding_dim, input_shape=(None, None), mask_zero=True, embeddings_regularizer=self.regularizer, name='input_embedding') self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate) self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate) self.output_embedding = tf.compat.v2.keras.layers.Dense(units=self.hparams.output_dim, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, name='output_projection') self.stacked_rnns = [] for _ in np.arange(self.hparams.depth): self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim, return_sequences=True, return_state=True, go_backwards=False, stateful=False, unroll=False, time_major=False, recurrent_dropout=self.hparams.hidden_dropout_rate, dropout=self.hparams.hidden_dropout_rate, kernel_regularizer=self.regularizer, recurrent_regularizer=self.regularizer, bias_regularizer=self.regularizer, )) @tf.function(experimental_relax_shapes=True) def call(self, inputs, **kwargs): if 'training' in kwargs: training = kwargs['training'] else: training = False embedded_input = self.input_embedding_dropout(self.input_embedding(inputs),training=training) rnn_outputs = embedded_input input_mask = self.input_embedding.compute_mask(inputs) float_input_mask = tf.cast(input_mask, dtype=tf.float32) for i in np.arange(self.hparams.depth): rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training) rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training) logits = self.output_embedding(rnn_outputs) logits = logits * float_input_mask[...,None] + tf.eye(self.hparams.output_dim)[0] * (1 - float_input_mask[...,None]) return logits class ClassifierLSTM(tf.keras.Model): def __init__(self, hparams, scope="cl_lstm", *inputs, **kwargs): del kwargs['cl_token'] super(ClassifierLSTM, self).__init__(*inputs, **kwargs) self.hparams = hparams self.scope = scope self.rep_index = 2 self.rep_layer = -1 self.model_name = '_'.join([self.scope, 'em-'+str(self.hparams.embedding_dim), 'h-'+str(self.hparams.hidden_dim), 'd-'+str(self.hparams.depth), 'hdrop-'+str(self.hparams.hidden_dropout_rate), 'indrop-'+str(self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.00001) self.create_vars() @tf.function def create_vars(self): self.input_embedding = tf.compat.v2.keras.layers.Embedding(input_dim=self.hparams.input_dim, output_dim=self.hparams.embedding_dim, input_shape=(None, None), mask_zero=True, embeddings_regularizer=self.regularizer, name='input_embedding') self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate) self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate) self.output_embedding = tf.compat.v2.keras.layers.Dense(units=self.hparams.output_dim, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, name='output_projection') self.stacked_rnns = [] for _ in np.arange(self.hparams.depth): self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim, return_sequences=True, return_state=True, go_backwards=False, stateful=False, unroll=False, time_major=False, recurrent_dropout=self.hparams.hidden_dropout_rate, dropout=self.hparams.hidden_dropout_rate, kernel_regularizer=self.regularizer, recurrent_regularizer=self.regularizer, bias_regularizer=self.regularizer, )) def call(self, inputs, **kwargs): if 'training' in kwargs: training = kwargs['training'] else: training = False @tf.function(experimental_relax_shapes=True) def _call(inputs, training): embedded_input = self.input_embedding_dropout(self.input_embedding(inputs),training=training) rnn_outputs = embedded_input input_mask = self.input_embedding.compute_mask(inputs) inputs_length = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1) for i in np.arange(self.hparams.depth): rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training) rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training) batch_size = tf.shape(rnn_outputs)[0] bach_indices = tf.expand_dims(tf.range(batch_size), 1) final_indexes = tf.concat([bach_indices, tf.expand_dims(tf.cast(inputs_length - 1, dtype=tf.int32), 1)], axis=-1) final_rnn_outputs = tf.gather_nd(rnn_outputs, final_indexes) logits = self.output_embedding(final_rnn_outputs) return logits return _call(inputs, training) #@tf.function(experimental_relax_shapes=True) def detailed_call(self, inputs, **kwargs): if 'training' in kwargs: training = kwargs['training'] else: training = False @tf.function(experimental_relax_shapes=True) def _call(inputs, training): embedded_input = self.input_embedding_dropout(self.input_embedding(inputs), training=training) rnn_outputs = embedded_input input_mask = self.input_embedding.compute_mask(inputs) inputs_length = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1) hidden_activation = [embedded_input] for i in np.arange(self.hparams.depth): rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training) hidden_activation.append(rnn_outputs) rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training) batch_size = tf.shape(rnn_outputs)[0] bach_indices = tf.expand_dims(tf.range(batch_size), 1) final_indexes = tf.concat([bach_indices, tf.expand_dims(tf.cast(inputs_length - 1, dtype=tf.int32), 1)], axis=-1) final_rnn_outputs = tf.gather_nd(rnn_outputs, final_indexes) logits = self.output_embedding(final_rnn_outputs) out = logits out = (out, final_rnn_outputs, hidden_activation) return out return _call(inputs, training) class LmLSTMSharedEmb(tf.keras.Model): def __init__(self, hparams, scope="lm_lstm_shared_emb",*inputs, **kwargs): del kwargs['cl_token'] super(LmLSTMSharedEmb, self).__init__() self.rep_index = 3 self.rep_layer = -1 self.hparams = hparams self.scope = scope self.model_name = '_'.join([self.scope, 'em-'+str(self.hparams.embedding_dim), 'h-'+str(self.hparams.hidden_dim), 'd-'+str(self.hparams.depth), 'hdrop-'+str(self.hparams.hidden_dropout_rate), 'indrop-'+str(self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.0000) self.create_vars() def create_vars(self): @tf.function def _create_vars(): self.input_embedding = SharedEmbeddings(vocab_size=self.hparams.input_dim, hidden_size=self.hparams.embedding_dim, initializer_range=self.hparams.initializer_range, regularizer=self.regularizer, name='embedding') self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate) self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate) initializer_range = self.hparams.embedding_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range self.output_projection = tf.keras.layers.Dense(units=self.hparams.embedding_dim, kernel_initializer=get_initializer(initializer_range)) self.stacked_rnns = [] self.rnn_initial_states = [] for _ in np.arange(self.hparams.depth): initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim, return_sequences=True, return_state=True, go_backwards=False, stateful=False, unroll=False, time_major=False, recurrent_dropout=self.hparams.hidden_dropout_rate, dropout=self.hparams.hidden_dropout_rate, kernel_regularizer=self.regularizer, recurrent_regularizer=self.regularizer, bias_regularizer=self.regularizer, kernel_initializer=get_initializer(initializer_range), recurrent_initializer=get_initializer(initializer_range) )) _create_vars() initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range for i in np.arange(self.hparams.depth): state_size = self.stacked_rnns[i].cell.state_size if nest.is_sequence(state_size): init_state = nest.map_structure(lambda x: create_init_var(x, i, initializer_range), state_size) else: init_state = create_init_var(state_size, i, initializer_range) self.rnn_initial_states.append(init_state) def call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs): @tf.function(experimental_relax_shapes=True) def _call(inputs, padding_symbol, **kwargs): input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool) embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'), **kwargs) rnn_outputs = embedded_input for i in np.arange(self.hparams.depth): batch_size_tensor = tf.shape(rnn_outputs)[0] absl.logging.info(self.rnn_initial_states[i]) def tile_init(unnested_init_state): return tf.tile(unnested_init_state, (batch_size_tensor, 1)) init_state = self.rnn_initial_states[i] if nest.is_sequence(init_state): init_for_batch = nest.map_structure(tile_init, init_state) else: init_for_batch = tile_init(init_state) rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, initial_state=init_for_batch, **kwargs) rnn_outputs = self.output_projection(rnn_outputs, **kwargs) rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs) logits = self.input_embedding(rnn_outputs, mode='linear') return logits return _call(inputs, padding_symbol, **kwargs) @tf.function(experimental_relax_shapes=True) def detailed_call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs): @tf.function(experimental_relax_shapes=True) def _call(inputs, padding_symbol, **kwargs): input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool) embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'), **kwargs) rnn_outputs = embedded_input hidden_activation = [embedded_input] for i in np.arange(self.hparams.depth): batch_size_tensor = tf.shape(rnn_outputs)[0] absl.logging.info(self.rnn_initial_states[i]) def tile_init(unnested_init_state): return tf.tile(unnested_init_state, (batch_size_tensor, 1)) init_state = self.rnn_initial_states[i] if nest.is_sequence(init_state): init_for_batch = nest.map_structure(tile_init, init_state) else: init_for_batch = tile_init(init_state) rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, initial_state=init_for_batch, **kwargs) hidden_activation.append(rnn_outputs) rnn_outputs = self.output_projection(rnn_outputs, **kwargs) rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs) inputs_lengths = tf.reduce_sum(input_mask, axis=-1) - 1 batch_indices = tf.range(batch_size_tensor) indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1) sentence_rep = tf.gather_nd(rnn_outputs, indices) logits = self.input_embedding(rnn_outputs, mode='linear') out = logits out = (out,rnn_outputs, sentence_rep, hidden_activation) return out return _call(inputs, padding_symbol, **kwargs) class LmLSTMSharedEmbV2(tf.keras.Model): def __init__(self, hparams, scope="lm_lstm_shared_emb",*inputs, **kwargs): del kwargs['cl_token'] super(LmLSTMSharedEmbV2, self).__init__() self.rep_index = 3 self.rep_layer = -1 self.hparams = hparams self.scope = scope self.model_name = '_'.join([self.scope, 'em-'+str(self.hparams.embedding_dim), 'h-'+str(self.hparams.hidden_dim), 'd-'+str(self.hparams.depth), 'hdrop-'+str(self.hparams.hidden_dropout_rate), 'indrop-'+str(self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.0000) self.create_vars() @tf.function def create_vars(self): self.input_embedding = SharedEmbeddings(vocab_size=self.hparams.input_dim, hidden_size=self.hparams.embedding_dim, initializer_range=self.hparams.initializer_range, regularizer=self.regularizer, name='embedding') self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate) self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate) initializer_range = self.hparams.embedding_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range self.output_projection = tf.keras.layers.Dense(units=self.hparams.embedding_dim, kernel_initializer=get_initializer(initializer_range)) self.stacked_rnns = [] self.rnn_initial_states = [] for _ in np.arange(self.hparams.depth): initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim, return_sequences=True, return_state=True, go_backwards=False, stateful=False, unroll=False, time_major=False, recurrent_dropout=self.hparams.hidden_dropout_rate, dropout=self.hparams.hidden_dropout_rate, kernel_regularizer=self.regularizer, recurrent_regularizer=self.regularizer, bias_regularizer=self.regularizer, kernel_initializer=get_initializer(initializer_range), recurrent_initializer=get_initializer(initializer_range) )) def call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs): @tf.function(experimental_relax_shapes=True) def _call(inputs, padding_symbol, **kwargs): input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool) embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'), **kwargs) rnn_outputs = embedded_input for i in np.arange(self.hparams.depth): rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, **kwargs) rnn_outputs = self.output_projection(rnn_outputs, **kwargs) rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs) logits = self.input_embedding(rnn_outputs, mode='linear') return logits return _call(inputs, padding_symbol, **kwargs) @tf.function(experimental_relax_shapes=True) def detailed_call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs): @tf.function(experimental_relax_shapes=True) def _call(inputs, padding_symbol, **kwargs): input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool) embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'), **kwargs) rnn_outputs = embedded_input hidden_activation = [embedded_input] for i in np.arange(self.hparams.depth): rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, **kwargs) hidden_activation.append(rnn_outputs) rnn_outputs = self.output_projection(rnn_outputs, **kwargs) rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs) batch_size_tensor = tf.shape(rnn_outputs)[0] inputs_lengths = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1) - 1 batch_indices = tf.range(batch_size_tensor) indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1) sentence_rep = tf.gather_nd(rnn_outputs, indices) logits = self.input_embedding(rnn_outputs, mode='linear') out = logits out = (out, rnn_outputs, sentence_rep, hidden_activation) return out return _call(inputs, padding_symbol, **kwargs) if __name__ == '__main__': class hparams(object): hidden_dim=8 input_dim=4 output_dim=4 depth=2 hidden_dropout_rate=0.1 lm_lstm = LmLSTM(hparams=hparams) inputs = np.int64(np.flip(np.sort(np.random.uniform(0,3,size=(2,5))))) inputs_mask = tf.equal(inputs, 0) print(inputs_mask) lm_lstm.build(input_shape=(None,None)) lm_lstm.summary() print(inputs) print(lm_lstm(inputs))
23,117
47.364017
138
py
Reflect
Reflect-master/tf2_models/transformers.py
import tensorflow as tf from tf2_models.common_layers import get_initializer, shape_list from tf2_models.embedding import SharedEmbeddings from tf2_models.transformer_layers import Block class GPT2(tf.keras.layers.Layer): def __init__(self, hparams, *inputs, **kwargs): super(GPT2, self).__init__(hparams, *inputs, **kwargs) self.output_hidden_states = hparams.output_hidden_states self.output_attentions = hparams.output_attentions self.output_embeddings = hparams.output_embeddings self.num_hidden_layers = hparams.depth self.vocab_size = hparams.vocab_size self.embedding_dim = hparams.embedding_dim self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.0001) self.create_vars(hparams) @tf.function def create_vars(self, hparams): self.wte = SharedEmbeddings(self.vocab_size, hparams.hidden_size, initializer_range=hparams.initializer_range, regularizer=self.regularizer, name='wte') self.wpe = tf.keras.layers.Embedding(hparams.n_positions, hparams.embedding_dim, embeddings_initializer=get_initializer(hparams.initializer_range), embeddings_regularizer=self.regularizer, name='wpe') self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop) self.h = [Block(hparams.n_ctx, hparams, regularizer=self.regularizer, scale=True, name='h_._{}'.format(i)) for i in range(hparams.depth)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f') def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, training=False): @tf.function(experimental_relax_shapes=True) def _call(inputs, past, attention_mask, token_type_ids, position_ids, training): if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = shape_list(past[0][0])[-2] if position_ids is None: position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] if attention_mask is not None: attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] attention_mask = tf.cast(attention_mask, tf.float32) attention_mask = (1.0 - attention_mask) * -10000.0 padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:,tf.newaxis,:,tf.newaxis], dtype=tf.float32) if attention_mask is None: attention_mask = padding_mask else: attention_mask = attention_mask*padding_mask input_shape = shape_list(inputs) input_ids = tf.reshape(inputs, [-1, input_shape[-1]]) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) inputs_embeds = self.wte(input_ids, mode='embedding') position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode='embedding') else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () all_attentions = [] all_hidden_states = () for i, (block, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block([hidden_states, layer_past, attention_mask], training=training) hidden_states, present = outputs[:2] presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states, presents) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) if self.output_embeddings: outputs = outputs + (inputs_embeds,) return outputs # last hidden state, presents, (all hidden_states), (attentions) return _call(inputs, past, attention_mask, token_type_ids, position_ids, training) class GPT2SharedWeights(GPT2): def __init__(self, hparams, *inputs, **kwargs): super(GPT2SharedWeights, self).__init__(hparams, *inputs, **kwargs) @tf.function def create_vars(self, hparams): self.wte = SharedEmbeddings(self.vocab_size , hparams.hidden_size, initializer_range=hparams.initializer_range, regularizer=self.regularizer, name='wte') self.wpe = tf.keras.layers.Embedding(hparams.n_positions, hparams.embedding_dim, embeddings_initializer=get_initializer(hparams.initializer_range), embeddings_regularizer=self.regularizer, name='wpe') self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop) attention_block = Block(hparams.n_ctx, hparams, regularizer=self.regularizer, scale=True, name='h') self.h = [attention_block for i in range(hparams.depth)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f') class Bert(tf.keras.layers.Layer): def __init__(self, hparams, *inputs, **kwargs): super(Bert, self).__init__(hparams, *inputs, **kwargs) self.output_hidden_states = hparams.output_hidden_states self.output_attentions = hparams.output_attentions self.output_embeddings = hparams.output_embeddings self.num_hidden_layers = hparams.depth self.vocab_size = hparams.vocab_size self.embedding_dim = hparams.embedding_dim self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.0001) self.create_vars(hparams) @tf.function def create_vars(self, hparams): self.wte = SharedEmbeddings(self.vocab_size, hparams.hidden_size, initializer_range=hparams.initializer_range, regularizer=self.regularizer, name='wte') self.wpe = tf.keras.layers.Embedding(hparams.n_positions, hparams.embedding_dim, embeddings_initializer=get_initializer(hparams.initializer_range), embeddings_regularizer=self.regularizer, name='wpe') self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop) self.h = [Block(hparams.n_ctx, hparams, regularizer=self.regularizer, scale=True, casual_masking=False, name='h_._{}'.format(i)) for i in range(hparams.depth)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f') def get_input_embeddings(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, training=False): if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = shape_list(past[0][0])[-2] if position_ids is None: position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] input_shape = shape_list(inputs) input_ids = tf.reshape(inputs, [-1, input_shape[-1]]) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) inputs_embeds = self.wte(input_ids, mode='embedding') position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode='embedding') else: token_type_embeds = 0 input_embeddings = inputs_embeds + position_embeds + token_type_embeds padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:, tf.newaxis, :, tf.newaxis], dtype=tf.float32) return input_embeddings, input_shape, padding_mask, past def call_with_embeddings(self, input_embeddings, input_shape, padding_mask, past, attention_mask=None, training=False): @tf.function(experimental_relax_shapes=True) def _call(input_embeddings, input_shape, padding_mask, past, attention_mask, training): if attention_mask is not None: attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] attention_mask = tf.cast(attention_mask, tf.float32) attention_mask = (1.0 - attention_mask) * -10000.0 if attention_mask is None: attention_mask = padding_mask else: attention_mask = attention_mask*padding_mask hidden_states = input_embeddings hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () all_attentions = [] all_hidden_states = () for i, (block, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block([hidden_states, layer_past, attention_mask], training=training) hidden_states, present = outputs[:2] presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states, presents) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) if self.output_embeddings: outputs = outputs return outputs # last hidden state, presents, (all hidden_states), (attentions), input_embedding return _call(input_embeddings, input_shape, padding_mask, past, attention_mask, training) def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, training=False): @tf.function(experimental_relax_shapes=True) def _call(inputs, past, attention_mask, token_type_ids, position_ids, training): if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = shape_list(past[0][0])[-2] if position_ids is None: position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] if attention_mask is not None: attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] attention_mask = tf.cast(attention_mask, tf.float32) attention_mask = (1.0 - attention_mask) * -10000.0 padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:,tf.newaxis,:,tf.newaxis], dtype=tf.float32) if attention_mask is None: attention_mask = padding_mask else: attention_mask = attention_mask*padding_mask input_shape = shape_list(inputs) input_ids = tf.reshape(inputs, [-1, input_shape[-1]]) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) inputs_embeds = self.wte(input_ids, mode='embedding') position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode='embedding') else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () all_attentions = [] all_hidden_states = () for i, (block, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block([hidden_states, layer_past, attention_mask], training=training) hidden_states, present = outputs[:2] presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states, presents) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) if self.output_embeddings: outputs = outputs + (inputs_embeds,) return outputs # last hidden state, presents, (all hidden_states), (attentions), input_embedding return _call(inputs, past, attention_mask, token_type_ids, position_ids, training) class BertSharedWeights(Bert): def __init__(self, hparams, *inputs, **kwargs): super(BertSharedWeights, self).__init__(hparams, *inputs, **kwargs) self.output_hidden_states = hparams.output_hidden_states self.output_attentions = hparams.output_attentions self.output_embeddings = hparams.output_embeddings self.num_hidden_layers = hparams.depth self.vocab_size = hparams.vocab_size self.embedding_dim = hparams.embedding_dim self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.0001) self.create_vars(hparams) @tf.function def create_vars(self, hparams): self.wte = SharedEmbeddings(self.vocab_size, hparams.hidden_size, initializer_range=hparams.initializer_range, regularizer=self.regularizer, name='wte') self.wpe = tf.keras.layers.Embedding(hparams.n_positions, hparams.embedding_dim, embeddings_initializer=get_initializer(hparams.initializer_range), embeddings_regularizer=self.regularizer, name='wpe') self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop) attention_block = Block(hparams.n_ctx, hparams, regularizer=self.regularizer, scale=True, casual_masking=False, name='h') self.h = [attention_block for i in range(hparams.depth)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
16,938
40.619165
113
py
Reflect
Reflect-master/tf2_models/resnet.py
import tensorflow as tf class ResnetBlock(tf.keras.layers.Layer): def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs): super(ResnetBlock, self).__init__(*inputs, **kwargs) self.filters = filters self.kernel_size = kernel_size self.activation = activation self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.000000002) self.create_layer() def create_layer(self): self.conv1 = tf.keras.layers.Conv2D(self.filters, self.kernel_size, activation=self.activation, padding='same', kernel_regularizer=self.regularizer) self.batch_norm1 = tf.keras.layers.BatchNormalization() self.conv2 = tf.keras.layers.Conv2D(self.filters, self.kernel_size, activation=None, padding='same', kernel_regularizer=self.regularizer) self.batch_norm2 = tf.keras.layers.BatchNormalization() self.add = tf.keras.layers.Add() self.activation = tf.keras.layers.Activation('relu') def call(self, inputs, training=None, **kwargs): outputs = self.conv1(inputs, training=training, **kwargs) outputs = self.batch_norm1(outputs,training=training, **kwargs) outputs = self.conv2(outputs, training=training, **kwargs) outputs = self.batch_norm2(outputs,training=training, **kwargs) outputs = self.add([outputs, inputs],training=training, **kwargs) outputs = self.activation(outputs, training=training, **kwargs) return outputs class Resnet(tf.keras.Model): def __init__(self, hparams, scope='resnet', *inputs, **kwargs): if 'cl_token' in kwargs: del kwargs['cl_token'] super(Resnet, self).__init__(name=scope, *inputs, **kwargs) self.scope = scope self.hparams = hparams self.model_name = '_'.join([self.scope, 'h-' + str(self.hparams.hidden_dim), 'rd-' + str(self.hparams.num_res_net_blocks), 'hdrop-' + str(self.hparams.hidden_dropout_rate), 'indrop-' + str(self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.000000002) self.create_layers() self.rep_index = 1 self.rep_layer = -1 def create_layers(self): self.batch_norm1 = tf.keras.layers.BatchNormalization() self.activation = tf.keras.layers.Activation('relu') self.conv1 = tf.keras.layers.Conv2D(self.hparams.filters[0], self.hparams.kernel_size[0], activation=None, kernel_regularizer=self.regularizer) self.batch_norm2 = tf.keras.layers.BatchNormalization() self.conv2 = tf.keras.layers.Conv2D(self.hparams.filters[1], self.hparams.kernel_size[1], activation=None, kernel_regularizer=self.regularizer) self.batch_norm3 = tf.keras.layers.BatchNormalization() self.pool2 = tf.keras.layers.MaxPooling2D(self.hparams.pool_size) self.resblocks = [] for i in range(self.hparams.num_res_net_blocks): self.resblocks.append(ResnetBlock(self.hparams.filters[2], self.hparams.kernel_size[2])) self.conv4 = tf.keras.layers.Conv2D(self.hparams.filters[3], self.hparams.kernel_size[3], activation=None) self.batch_norm4 = tf.keras.layers.BatchNormalization() self.avgpool = tf.keras.layers.GlobalAveragePooling2D() self.dense = tf.keras.layers.Dense(self.hparams.hidden_dim, activation='relu') self.dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate) self.project = tf.keras.layers.Dense(self.hparams.output_dim, activation=None) def call(self, inputs, padding_symbol=None, training=None, **kwargs): x = inputs #self.batch_norm1(inputs, training=training, **kwargs) x = self.conv1(x, training=training, **kwargs) x = self.batch_norm2(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) x = self.conv2(x, training=training, **kwargs) x = self.batch_norm3(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) x = self.pool2(x, training=training, **kwargs) for i in range(self.hparams.num_res_net_blocks): x = self.resblocks[i](x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) x = self.conv4(x, training=training, **kwargs) x = self.batch_norm4(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) x = self.avgpool(x, training=training, **kwargs) x = self.dense(x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) outputs = self.project(x, training=training, **kwargs) return outputs def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs): self.layer_activations = [] x = self.batch_norm1(inputs, training=training, **kwargs) x = self.conv1(x, training=training, **kwargs) x = self.batch_norm2(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) x = self.conv2(x, training=training, **kwargs) x = self.batch_norm3(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) x = self.pool2(x, training=training, **kwargs) for i in range(self.hparams.num_res_net_blocks): x = self.resblocks[i](x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) x = self.conv4(x, training=training, **kwargs) x = self.batch_norm4(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) x = self.avgpool(x, training=training, **kwargs) x = self.dense(x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) pnltimt = x outputs = self.project(x, training=training, **kwargs) return outputs, pnltimt, self.layer_activations
6,572
40.601266
94
py
Reflect
Reflect-master/tf2_models/cnn.py
import tensorflow as tf import numpy as np def max_out(inputs, num_units, axis=None): shape = inputs.get_shape().as_list() if shape[0] is None: shape[0] = -1 if axis is None: # Assume that channel is the last dimension axis = -1 num_channels = shape[axis] if num_channels % num_units: raise ValueError('number of features({}) is not ' 'a multiple of num_units({})'.format(num_channels, num_units)) shape[axis] = num_units shape += [num_channels // num_units] outputs = tf.reduce_max(tf.reshape(inputs, shape), -1) return outputs class VanillaCNN(tf.keras.models.Model): def __init__(self, hparams, scope="cl_vcnn", *inputs, **kwargs): if 'cl_token' in kwargs: del kwargs['cl_token'] super(VanillaCNN, self).__init__(*inputs, **kwargs) self.hparams = hparams self.scope = scope self.model_name = '_'.join([self.scope, 'hc-' + '.'.join( [str(h) for h in self.hparams.filters]), 'hfc-' + '.'.join( [str(h) for h in self.hparams.fc_dim]), 'd-' + str(self.hparams.depth), 'hdrop-' + str( self.hparams.hidden_dropout_rate), 'indrop-' + str( self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.000000002) self.create_vars() self.rep_index = 1 self.rep_layer = -1 def create_vars(self): self.indrop = tf.keras.layers.Dropout(rate=self.hparams.input_dropout_rate) self.cnns = [] self.cnn_nns = [] self.cnn_bnz = [] self.cnn_activations = [] self.cnn_pooling = [] self.cnn_dropouts = [] for i in np.arange(self.hparams.depth): self.cnns.append(tf.keras.layers.Conv2D(self.hparams.filters[i], self.hparams.kernel_size[i], activation=None, kernel_regularizer=self.regularizer)) # if self.hparams.maxout_size[i] < self.hparams.filters[i]: # nn_size = int(self.hparams.filters[i] / self.hparams.maxout_size[i]) # self.cnn_nns.append(tf.keras.layers.Conv2D(self.hparams.maxout_size[i], # (1,1), # activation=None, # kernel_regularizer=self.regularizer)) # else: # self.cnn_nns.append(tf.keras.layers.Lambda(lambda x: x)) self.cnn_bnz.append(tf.keras.layers.BatchNormalization()) self.cnn_activations.append(tf.keras.layers.Activation('relu')) self.cnn_pooling.append( tf.keras.layers.MaxPooling2D(self.hparams.pool_size[i])) self.cnn_dropouts.append( tf.keras.layers.Dropout(rate=self.hparams.hidden_dropout_rate)) self.avg_pool = tf.keras.layers.GlobalAveragePooling2D() self.densez = [] self.dense_bnz = [] self.dense_activations = [] self.dense_dropouts = [] for i in np.arange(self.hparams.proj_depth): self.densez.append( tf.keras.layers.Dense(self.hparams.fc_dim[i], activation=None, kernel_regularizer=self.regularizer)) self.dense_bnz.append(tf.keras.layers.BatchNormalization()) self.dense_activations.append(tf.keras.layers.Activation('relu')) self.dense_dropouts.append( tf.keras.layers.Dropout(rate=self.hparams.hidden_dropout_rate)) self.projector = tf.keras.layers.Dense(self.hparams.output_dim, kernel_regularizer=self.regularizer) def call(self, inputs, padding_symbol=None, training=None, **kwargs): x = self.indrop(inputs, training=training, **kwargs) for i in np.arange(self.hparams.depth): x = self.cnns[i](x, training=training, **kwargs) # x = self.cnn_nns[i](x, training=training, **kwargs) x = max_out(x, self.hparams.maxout_size[i]) x = self.cnn_bnz[i](x, training=training, **kwargs) x = self.cnn_activations[i](x, training=training, **kwargs) x = self.cnn_pooling[i](x, training=training, **kwargs) x = self.cnn_dropouts[i](x, training=training, **kwargs) x = self.avg_pool(x, **kwargs) for i in np.arange(self.hparams.proj_depth): x = self.densez[i](x, training=training, **kwargs) x = self.dense_bnz[i](x, training=training, **kwargs) x = self.dense_activations[i](x, training=training, **kwargs) x = self.dense_dropouts[i](x, training=training, **kwargs) logits = self.projector(x, training=training, **kwargs) return logits def detailed_call(self, inputs, padding_symbol=None, **kwargs): x = self.indrop(inputs) hidden_activations = [] for i in np.arange(self.hparams.depth): x = self.cnns[i](x, **kwargs) x = max_out(x, self.hparams.maxout_size[i]) x = self.cnn_bnz[i](x, **kwargs) x = self.cnn_activations[i](x, **kwargs) x = self.cnn_pooling[i](x, **kwargs) x = self.cnn_dropouts[i](x, **kwargs) hidden_activations.append(x) x = self.avg_pool(x, **kwargs) hidden_activations.append(x) for i in np.arange(self.hparams.proj_depth): x = self.densez[i](x, **kwargs) x = self.dense_bnz[i](x, **kwargs) x = self.dense_activations[i](x, **kwargs) x = self.dense_dropouts[i](x, **kwargs) hidden_activations.append(x) logits = self.projector(x, **kwargs) return logits, hidden_activations[-1], hidden_activations
5,878
38.993197
91
py
Reflect
Reflect-master/tf2_models/utils.py
import tensorflow as tf import re from tensorboard.compat.tensorflow_stub import tensor_shape def camel2snake(name): return name[0].lower() + re.sub(r'(?!^)[A-Z]', lambda x: '_' + x.group(0).lower(), name[1:]) def log_summary(log_value, log_name, summary_scope): """Produce scalar summaries.""" with tf.compat.v2.summary.experimental.summary_scope(summary_scope): tf.summary.scalar(log_name, log_value) def create_init_var(unnested_state_size, i, initializer_range): flat_dims = tensor_shape.as_shape(unnested_state_size).as_list() init_state_size = [1] + flat_dims return tf.Variable(shape=init_state_size, dtype=tf.float32, initial_value=tf.keras.initializers.TruncatedNormal(stddev=initializer_range)( shape=init_state_size), trainable=True, name="lstm_init_" + str(i))
884
31.777778
99
py
Reflect
Reflect-master/tf2_models/train_utils.py
import absl import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import LearningRateSchedule from tensorflow.python.ops import math_ops from tensorflow.python.util.tf_export import keras_export from tensorflow_addons.utils import keras_utils @keras_export("keras.optimizers.schedules.ExponentialDecay") class ExponentialDecayWithWarmpUp(LearningRateSchedule): """A LearningRateSchedule that uses an exponential decay schedule.""" def __init__( self, initial_learning_rate, decay_steps, decay_rate, warmup_steps, warmup_learning_rate=0.0, hold_base_rate_steps=0, staircase=False, name=None): """Applies exponential decay to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies an exponential decay function to an optimizer step, given a provided initial learning rate. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): return initial_learning_rate * decay_rate ^ (step / decay_steps) ``` If the argument `staircase` is `True`, then `step / decay_steps` is an integer division and the decayed learning rate follows a staircase function. You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: When fitting a Keras model, decay every 100000 steps with a base of 0.96: ```python initial_learning_rate = 0.1 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True) model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The decay rate. staircase: Boolean. If `True` decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ super(ExponentialDecayWithWarmpUp, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.warmup_steps = warmup_steps self.warmup_learning_rate = warmup_learning_rate self.hold_base_rate_steps = hold_base_rate_steps self.staircase = staircase self.name = name @tf.function(experimental_relax_shapes=True) def __call__(self, step): with ops.name_scope_v2(self.name or "ExponentialDecay") as name: initial_learning_rate = tf.constant( self.initial_learning_rate, name="initial_learning_rate", dtype=tf.float32) warmup_learning_rate = tf.constant( self.warmup_learning_rate, name="warmup_learning_rate", dtype=tf.float32) dtype = initial_learning_rate.dtype decay_steps = math_ops.cast(self.decay_steps, dtype) decay_rate = math_ops.cast(self.decay_rate, dtype) warmup_steps = math_ops.cast(self.warmup_steps, dtype) hold_base_rate_steps = math_ops.cast(self.hold_base_rate_steps, dtype) global_step_recomp = math_ops.cast(step, dtype) p = (global_step_recomp - (warmup_steps+hold_base_rate_steps)) / (decay_steps) if self.staircase: p = math_ops.floor(p) learning_rate= math_ops.multiply( initial_learning_rate, math_ops.pow(decay_rate, p), name=name) learning_rate = tf.where( global_step_recomp > (warmup_steps + hold_base_rate_steps), learning_rate, initial_learning_rate) if self.warmup_steps > 0: if self.initial_learning_rate < self.warmup_learning_rate: raise ValueError('learning_rate_base must be larger or equal to ' 'warmup_learning_rate.') slope = (initial_learning_rate - warmup_learning_rate) / warmup_steps warmup_rate = slope * tf.cast(global_step_recomp, tf.float32) + warmup_learning_rate learning_rate = tf.where(global_step_recomp < warmup_steps, warmup_rate, learning_rate) return learning_rate def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "decay_rate": self.decay_rate, "staircase": self.staircase, "warmup_steps": self.warmup_steps, "warmup_learning_rate": self.warmup_learning_rate, "hold_base_rate_steps": self.hold_base_rate_steps, "name": self.name } class RectifiedAdam(tf.keras.optimizers.Optimizer): """Variant of the Adam optimizer whose adaptive learning rate is rectified so as to have a consistent variance. It implements the Rectified Adam (a.k.a. RAdam) proposed by Liyuan Liu et al. in [On The Variance Of The Adaptive Learning Rate And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf). Example of usage: ```python opt = tfa.optimizers.RectifiedAdam(lr=1e-3) ``` Note: `amsgrad` is not described in the original paper. Use it with caution. RAdam is not a placement of the heuristic warmup, the settings should be kept if warmup has already been employed and tuned in the baseline method. You can enable warmup by setting `total_steps` and `warmup_proportion`: ```python opt = tfa.optimizers.RectifiedAdam( lr=1e-3, total_steps=10000, warmup_proportion=0.1, min_lr=1e-5, ) ``` In the above example, the learning rate will increase linearly from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr` in 9000 steps. Lookahead, proposed by Michael R. Zhang et.al in the paper [Lookahead Optimizer: k steps forward, 1 step back] (https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam, which is announced by Less Wright and the new combined optimizer can also be called "Ranger". The mechanism can be enabled by using the lookahead wrapper. For example: ```python radam = tfa.optimizers.RectifiedAdam() ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5) ``` """ def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, weight_decay=0., amsgrad=False, sma_threshold=5.0, total_steps=0, warmup_proportion=0.1, min_lr=0., name='RectifiedAdam', **kwargs): r"""Construct a new RAdam optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the 2nd moment estimates. epsilon: A small constant for numerical stability. weight_decay: A floating point value. Weight decay for each param. amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from the paper "On the Convergence of Adam and beyond". sma_threshold. A float value. The threshold for simple mean average. total_steps: An integer. Total number of training steps. Enable warmup by setting a positive value. warmup_proportion: A floating point value. The proportion of increasing steps. min_lr: A floating point value. Minimum learning rate after warmup. name: Optional name for the operations created when applying gradients. Defaults to "RectifiedAdam". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ super(RectifiedAdam, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self._set_hyper('decay', self._initial_decay) self._set_hyper('weight_decay', weight_decay) self._set_hyper('sma_threshold', sma_threshold) self._set_hyper('total_steps', float(total_steps)) self._set_hyper('warmup_proportion', warmup_proportion) self._set_hyper('min_lr', min_lr) self.epsilon = epsilon or tf.keras.backend.epsilon() self.amsgrad = amsgrad self._initial_weight_decay = weight_decay self._initial_total_steps = total_steps def _create_slots(self, var_list): for var in var_list: self.add_slot(var, 'm') for var in var_list: self.add_slot(var, 'v') if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def set_weights(self, weights): params = self.weights num_vars = int((len(params) - 1) / 2) if len(weights) == 3 * num_vars + 1: weights = weights[:len(params)] super(RectifiedAdam, self).set_weights(weights) def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype) local_step = tf.cast(self.iterations + 1, var_dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) if self._initial_total_steps > 0: total_steps = self._get_hyper('total_steps', var_dtype) warmup_steps = total_steps *\ self._get_hyper('warmup_proportion', var_dtype) min_lr = self._get_hyper('min_lr', var_dtype) decay_steps = tf.maximum(total_steps - warmup_steps, 1) decay_rate = (min_lr - lr_t) / decay_steps lr_t = tf.where( local_step <= warmup_steps, lr_t * (local_step / warmup_steps), lr_t + decay_rate * tf.minimum(local_step - warmup_steps, decay_steps), ) sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0 sma_t = sma_inf - 2.0 * local_step * beta_2_power / ( 1.0 - beta_2_power) m_t = m.assign( beta_1_t * m + (1.0 - beta_1_t) * grad, use_locking=self._use_locking) m_corr_t = m_t / (1.0 - beta_1_power) v_t = v.assign( beta_2_t * v + (1.0 - beta_2_t) * tf.square(grad), use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign( tf.maximum(vhat, v_t), use_locking=self._use_locking) v_corr_t = tf.sqrt(vhat_t / (1.0 - beta_2_power)) else: vhat_t = None v_corr_t = tf.sqrt(v_t / (1.0 - beta_2_power)) r_t = tf.sqrt((sma_t - 4.0) / (sma_inf - 4.0) * (sma_t - 2.0) / (sma_inf - 2.0) * sma_inf / sma_t) sma_threshold = self._get_hyper('sma_threshold', var_dtype) var_t = tf.where(sma_t >= sma_threshold, r_t * m_corr_t / (v_corr_t + epsilon_t), m_corr_t) if self._initial_weight_decay > 0.0: var_t += self._get_hyper('weight_decay', var_dtype) * var var_update = var.assign_sub( lr_t * var_t, use_locking=self._use_locking) updates = [var_update, m_t, v_t] if self.amsgrad: updates.append(vhat_t) return tf.group(*updates) def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype) local_step = tf.cast(self.iterations + 1, var_dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) if self._initial_total_steps > 0: total_steps = self._get_hyper('total_steps', var_dtype) warmup_steps = total_steps *\ self._get_hyper('warmup_proportion', var_dtype) min_lr = self._get_hyper('min_lr', var_dtype) decay_steps = tf.maximum(total_steps - warmup_steps, 1) decay_rate = (min_lr - lr_t) / decay_steps lr_t = tf.where( local_step <= warmup_steps, lr_t * (local_step / warmup_steps), lr_t + decay_rate * tf.minimum(local_step - warmup_steps, decay_steps), ) sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0 sma_t = sma_inf - 2.0 * local_step * beta_2_power / ( 1.0 - beta_2_power) m = self.get_slot(var, 'm') m_scaled_g_values = grad * (1 - beta_1_t) m_t = m.assign(m * beta_1_t, use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) m_corr_t = m_t / (1.0 - beta_1_power) v = self.get_slot(var, 'v') v_scaled_g_values = (grad * grad) * (1 - beta_2_t) v_t = v.assign(v * beta_2_t, use_locking=self._use_locking) with tf.control_dependencies([v_t]): v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign( tf.maximum(vhat, v_t), use_locking=self._use_locking) v_corr_t = tf.sqrt(vhat_t / (1.0 - beta_2_power)) else: vhat_t = None v_corr_t = tf.sqrt(v_t / (1.0 - beta_2_power)) r_t = tf.sqrt((sma_t - 4.0) / (sma_inf - 4.0) * (sma_t - 2.0) / (sma_inf - 2.0) * sma_inf / sma_t) sma_threshold = self._get_hyper('sma_threshold', var_dtype) var_t = tf.where(sma_t >= sma_threshold, r_t * m_corr_t / (v_corr_t + epsilon_t), m_corr_t) if self._initial_weight_decay > 0.0: var_t += self._get_hyper('weight_decay', var_dtype) * var with tf.control_dependencies([var_t]): var_update = self._resource_scatter_add( var, indices, tf.gather(-lr_t * var_t, indices)) updates = [var_update, m_t, v_t] if self.amsgrad: updates.append(vhat_t) return tf.group(*updates) def get_config(self): config = super(RectifiedAdam, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'decay': self._serialize_hyperparameter('decay'), 'weight_decay': self._serialize_hyperparameter('weight_decay'), 'sma_threshold': self._serialize_hyperparameter('sma_threshold'), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad, 'total_steps': self._serialize_hyperparameter('total_steps'), 'warmup_proportion': self._serialize_hyperparameter('warmup_proportion'), 'min_lr': self._serialize_hyperparameter('min_lr'), }) return config
17,416
40.568019
92
py
Reflect
Reflect-master/tf2_models/transformer_layers.py
import tensorflow as tf from tf2_models.common_layers import get_initializer, shape_list, gelu class Attention(tf.keras.layers.Layer): def __init__(self, hidden_dim, n_ctx, config, regularizer, casual_masking=True, scale=False, **kwargs): super(Attention, self).__init__(**kwargs) self.output_attentions = config.output_attentions self.casual_masking = casual_masking n_state = hidden_dim assert n_state % config.n_head == 0 self.n_ctx = n_ctx self.n_head = config.n_head self.split_size = n_state self.scale = scale self.regularizer = regularizer self.c_attn = Conv1D(nf=n_state * 3, nx=hidden_dim, initializer_range=config.initializer_range, regularizer=self.regularizer, name='c_attn') self.c_proj = Conv1D(nf=n_state, nx=hidden_dim, initializer_range=config.initializer_range, regularizer=self.regularizer, name='c_proj') self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop) @staticmethod def causal_attention_mask(nd, ns, dtype): """1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:, None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, inputs, training=False): q, k, v, attention_mask = inputs # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) if self.casual_masking: b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask w = w + attention_mask w = tf.nn.softmax(w, axis=-1) w = self.attn_dropout(w, training=training) outputs = [tf.matmul(w, v)] if self.output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call(self, inputs, training=False): x, layer_past, attention_mask = inputs x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=1) key = tf.concat([past_key, key], axis=-2) value = tf.concat([past_value, value], axis=-2) present = tf.stack([key, value], axis=1) attn_outputs = self._attn([query, key, value, attention_mask], training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a, present] + attn_outputs[1:] return outputs # a, present, (attentions) class Conv1D(tf.keras.layers.Layer): def __init__(self, nf, nx, regularizer, initializer_range=0.02, **kwargs): """ TFConv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2) Basically works like a Linear layer but the weights are transposed """ super(Conv1D, self).__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range self.regularizer = regularizer def build(self, input_shape): self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range), regularizer=self.regularizer) self.bias = self.add_weight( "bias", shape=[1, self.nf], initializer=tf.zeros_initializer(), regularizer=self.regularizer) def call(self, x, **kwargs): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class Block(tf.keras.layers.Layer): def __init__(self, n_ctx, config, regularizer, casual_masking=True, scale=False, **kwargs): super(Block, self).__init__(**kwargs) self.regularizer = regularizer nx = config.embedding_dim self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1') self.attn = Attention(hidden_dim=nx, n_ctx=n_ctx, config=config, scale=scale, regularizer=self.regularizer, casual_masking=casual_masking, name='attn') self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2') self.mlp = TransformerMLP(4 * nx, config, regularizer=self.regularizer, name='mlp') def call(self, inputs, training=False): x, layer_past, attention_mask = inputs a = self.ln_1(x) output_attn = self.attn([a, layer_past, attention_mask], training=training) a = output_attn[0] # output_attn: a, present, (attentions) x = x + a m = self.ln_2(x) m = self.mlp(m, training=training) x = x + m outputs = [x] + output_attn[1:] return outputs # x, present, (attentions) class TransformerMLP(tf.keras.layers.Layer): def __init__(self, n_state, config, regularizer, **kwargs): super(TransformerMLP, self).__init__(**kwargs) self.regularizer = regularizer nx = config.embedding_dim self.c_fc = Conv1D(n_state, nx, initializer_range=config.initializer_range, regularizer=self.regularizer, name='c_fc') self.c_proj = Conv1D(nx, n_state, initializer_range=config.initializer_range, regularizer=self.regularizer, name='c_proj') self.act = gelu self.dropout = tf.keras.layers.Dropout(config.resid_pdrop) def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2
6,560
35.049451
105
py
Reflect
Reflect-master/tf2_models/ff_resnet.py
import tensorflow as tf class FFResnetBlock(tf.keras.layers.Layer): def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs): super(FFResnetBlock, self).__init__(*inputs, **kwargs) self.filters = filters self.kernel_size = kernel_size self.activation = activation self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.000000002) self.create_layer() def create_layer(self): self.conv1 = tf.keras.layers.Dense(self.filters*9, activation=self.activation, kernel_regularizer=self.regularizer) self.batch_norm1 = tf.keras.layers.BatchNormalization() self.conv2 = tf.keras.layers.Dense(self.filters*9, activation=None, kernel_regularizer=self.regularizer) self.batch_norm2 = tf.keras.layers.BatchNormalization() self.add = tf.keras.layers.Add() self.activation = tf.keras.layers.Activation('relu') def call(self, inputs, training=None, **kwargs): outputs = self.conv1(inputs, training=training, **kwargs) outputs = self.batch_norm1(outputs,training=training, **kwargs) outputs = self.conv2(outputs, training=training, **kwargs) outputs = self.batch_norm2(outputs,training=training, **kwargs) outputs = self.add([outputs, inputs],training=training, **kwargs) outputs = self.activation(outputs, training=training, **kwargs) return outputs class FFResnet(tf.keras.Model): def __init__(self, hparams, scope='ff_resnet', *inputs, **kwargs): if 'cl_token' in kwargs: del kwargs['cl_token'] super(FFResnet, self).__init__(name=scope, *inputs, **kwargs) self.scope = scope self.hparams = hparams self.model_name = '_'.join([self.scope, 'h-' + str(self.hparams.hidden_dim), 'rd-' + str(self.hparams.num_res_net_blocks), 'hdrop-' + str(self.hparams.hidden_dropout_rate), 'indrop-' + str(self.hparams.input_dropout_rate)]) self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00, l2=0.000000002) self.create_layers() self.rep_index = 1 self.rep_layer = -1 def create_layers(self): self.flat = tf.keras.layers.Flatten() self.batch_norm1 = tf.keras.layers.BatchNormalization() self.activation = tf.keras.layers.Activation('relu') self.conv1 = tf.keras.layers.Dense(self.hparams.filters[0]*9, activation=None, kernel_regularizer=self.regularizer) self.batch_norm2 = tf.keras.layers.BatchNormalization() self.conv2 = tf.keras.layers.Dense(self.hparams.filters[1]*9, activation=None, kernel_regularizer=self.regularizer) self.batch_norm3 = tf.keras.layers.BatchNormalization() self.resblocks = [] for i in range(self.hparams.num_res_net_blocks): self.resblocks.append(FFResnetBlock(self.hparams.filters[2], self.hparams.kernel_size[2])) self.conv4 = tf.keras.layers.Dense(self.hparams.filters[3]*9, activation=None) self.batch_norm4 = tf.keras.layers.BatchNormalization() self.dense = tf.keras.layers.Dense(self.hparams.hidden_dim, activation='relu') self.dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate) self.project = tf.keras.layers.Dense(self.hparams.output_dim, activation=None) def call(self, inputs, padding_symbol=None, training=None, **kwargs): x = self.flat(inputs, **kwargs) x = self.batch_norm1(x, training=training, **kwargs) x = self.conv1(x, training=training, **kwargs) x = self.batch_norm2(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) x = self.conv2(x, training=training, **kwargs) x = self.batch_norm3(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) for i in range(self.hparams.num_res_net_blocks): x = self.resblocks[i](x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) x = self.conv4(x, training=training, **kwargs) x = self.batch_norm4(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) x = self.dense(x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) outputs = self.project(x, training=training, **kwargs) return outputs def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs): self.layer_activations = [] x = self.flat(inputs, **kwargs) x = self.batch_norm1(x, training=training, **kwargs) x = self.conv1(x, training=training, **kwargs) x = self.batch_norm2(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) x = self.conv2(x, training=training, **kwargs) x = self.batch_norm3(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) for i in range(self.hparams.num_res_net_blocks): x = self.resblocks[i](x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) x = self.conv4(x, training=training, **kwargs) x = self.batch_norm4(x, training=training, **kwargs) x = self.activation(x) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) x = self.dense(x, training=training, **kwargs) x = self.dropout(x, training=training, **kwargs) self.layer_activations.append(x) pnltimt = x outputs = self.project(x, training=training, **kwargs) return outputs, pnltimt, self.layer_activations
6,118
39.256579
96
py
Reflect
Reflect-master/tf2_models/keras_callbacks.py
import tensorflow as tf from tf2_models.utils import log_summary class CheckpointCallback(tf.keras.callbacks.Callback): def __init__(self, manager, ckpt): super(CheckpointCallback, self).__init__() self.manager = manager self.ckpt = ckpt def on_epoch_end(self, epoch, logs=None): self.ckpt.step.assign_add(1) save_path = self.manager.save() tf.print("Epoch %d: " %epoch) tf.print("Saved checkpoint for:", save_path) class SummaryCallback(tf.keras.callbacks.Callback): def __init__(self, summary_writer): self.summary_writer = summary_writer def on_train_batch_end(self, batch, logs=None): if (self.model.optimizer.iterations % 200) == 0: print(logs) if 'loss' in logs.keys(): log_summary(log_name='learning_rate', log_value=self.model.optimizer.learning_rate( self.model.optimizer.iterations), summary_scope='train') log_summary(log_name='fine_total_loss', log_value=logs['loss'], summary_scope='train') if 'masked_sequence_loss' in logs.keys(): log_summary(log_name='fine_lm_loss', log_value=logs['masked_sequence_loss'], summary_scope='train') if 'sequence_loss' in logs.keys(): log_summary(log_name='fine_lm_loss', log_value=logs['sequence_loss'], summary_scope='train') def on_epoch_end(self, epoch, logs=None): # Log summary for test and train if 'masked_sequence_loss' in logs.keys(): log_summary(log_name='perolexity', log_value=tf.exp(logs['masked_sequence_loss']), summary_scope='train') log_summary(log_name='perplexity', log_value=tf.exp(logs['val_masked_sequence_loss']), summary_scope='valid') for key in logs.keys(): if 'val' in key: log_summary(log_name=key, log_value=logs[key], summary_scope='valid') else: log_summary(log_name=key, log_value=logs[key], summary_scope='train')
1,859
38.574468
148
py
Reflect
Reflect-master/tf2_models/metrics.py
import tensorflow as tf @tf.function(experimental_relax_shapes=True) def distill_loss(y_true, y_pred, tmp): y_true = tf.cast(tf.squeeze(y_true), dtype=tf.float32) scale_factor = 1.0 / (tmp*tmp) return tf.reduce_mean(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=y_pred / tmp, labels=y_true, name='loss')) * scale_factor @tf.function(experimental_relax_shapes=True) def sequence_distill_loss(y_true, y_pred, padding_symbol, tmp): y_true = tf.cast(tf.squeeze(y_true), dtype=tf.float32) sequence_mask = tf.cast(y_true[..., padding_symbol] != 1.0, dtype=tf.float32) sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask) scale_factor = 1.0 / (tmp * tmp) return tf.reduce_sum(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=y_pred / tmp, labels=y_true, name='loss') * sequence_mask) * scale_factor @tf.function(experimental_relax_shapes=True) def masked_sequence_loss(y_true, y_pred, padding_symbol=0): y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64) sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32) # [batch_size, 1] sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask, axis=-1)[...,None] return tf.reduce_mean(tf.reduce_sum(tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true, name='loss') * sequence_mask, axis=-1)) @tf.function(experimental_relax_shapes=True) def batch_masked_sequence_loss(y_true, y_pred, padding_symbol=0): y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64) sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32) # [batch_size, 1] sequence_mask = sequence_mask return tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true, name='loss'), sequence_mask @tf.function(experimental_relax_shapes=True) def masked_perplexity(y_true, y_pred, padding_symbol=0): y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64) sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32) # [batch_size, 1] sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask, axis=-1)[...,None] return tf.reduce_mean(tf.exp(tf.reduce_sum(tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true, name='loss') * sequence_mask, axis=-1))) @tf.function(experimental_relax_shapes=True) def masked_batch_perplexity(y_true, y_pred, padding_symbol=0): y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64) sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32) # [batch_size, 1] sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask) return tf.exp(tf.reduce_sum(sequence_mask * tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true, name='loss'))) #@tf.function(experimental_relax_shapes=True) def classification_loss(y_true, y_pred): if len(y_true.shape) > 1: y_true = tf.squeeze(y_true, axis=-1) y_true = tf.cast(y_true, dtype=tf.int64) return tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true, name='loss') @tf.function(experimental_relax_shapes=True) def accuracy(targets, logits, padding_symbol=0): targets = tf.cast(tf.squeeze(targets), dtype=tf.int64) sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32) return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(1)) @tf.function(experimental_relax_shapes=True) def unmasked_accuracy(targets, logits, ): targets = tf.cast(tf.squeeze(targets), dtype=tf.int64) return unmasked_accuracy_topk(targets, logits, topk=tf.constant(1)) @tf.function(experimental_relax_shapes=True) def accuracy_top2(targets, logits, padding_symbol=0): targets = tf.cast(tf.squeeze(targets), dtype=tf.int64) sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32) return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(2)) @tf.function(experimental_relax_shapes=True) def unmasked_accuracy_top2(targets, logits, ): targets = tf.cast(tf.squeeze(targets), dtype=tf.int64) return unmasked_accuracy_topk(targets, logits, topk=tf.constant(2)) @tf.function(experimental_relax_shapes=True) def accuracy_top5(targets, logits, padding_symbol=0): targets = tf.cast(tf.squeeze(targets), dtype=tf.int64) sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32) return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(5)) @tf.function(experimental_relax_shapes=True) def unmasked_accuracy_top5(targets, logits, ): targets = tf.cast(tf.squeeze(targets), dtype=tf.int64) return unmasked_accuracy_topk(targets, logits, topk=tf.constant(5)) @tf.function(experimental_relax_shapes=True) def accuracy_topk(targets, logits, sequence_mask, topk): orig_shape = tf.shape(logits) last_dim = orig_shape[-1] logits = tf.reshape(logits, (-1,last_dim)) targets = tf.reshape(targets, (-1,1)) sequence_mask = tf.cast(tf.reshape(sequence_mask, (-1,1)), tf.float32) unmasked_accuracies = tf.keras.metrics.sparse_top_k_categorical_accuracy(y_true=targets, y_pred=logits, k=topk) normalizing_factor = sequence_mask / tf.reduce_sum(sequence_mask) normalizing_factor = tf.squeeze(normalizing_factor) return tf.reduce_sum(tf.multiply(normalizing_factor, unmasked_accuracies)) @tf.function(experimental_relax_shapes=True) def unmasked_accuracy_topk(targets, logits, topk): orig_shape = tf.shape(logits) last_dim = orig_shape[-1] logits = tf.reshape(logits, (-1,last_dim)) targets = tf.reshape(targets, (-1,1)) unmasked_accuracies = tf.keras.metrics.sparse_top_k_categorical_accuracy(y_true=targets, y_pred=logits, k=topk) return tf.reduce_mean(unmasked_accuracies) class MaskedSequenceLoss(tf.keras.losses.Loss): def __init__(self, padding_symbol=0, num_replicas_in_sync=1, **kwargs): super(MaskedSequenceLoss, self).__init__(reduction=tf.keras.losses.Reduction.SUM, **kwargs) self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64) self.name = "batch_masked_sequence_loss" self.num_replicas_in_sync = num_replicas_in_sync def call(self, y_true, y_pred, sample_weight=None): entropies, mask = batch_masked_sequence_loss(y_true=y_true, y_pred=y_pred, padding_symbol=self.padding_symbol) if sample_weight is not None: mask = sample_weight norm_factor = mask / tf.reduce_sum(mask) return tf.reduce_sum(entropies * norm_factor) / self.num_replicas_in_sync class MaskedSequenceMetric(tf.keras.losses.Loss): def __init__(self, padding_symbol=0, **kwargs): super(MaskedSequenceMetric, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs) self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64) self.name = "batch_masked_sequence_loss" def call(self, y_true, y_pred, sample_weight=None): entropies, mask = batch_masked_sequence_loss(y_true=y_true, y_pred=y_pred, padding_symbol=self.padding_symbol) if sample_weight is not None: mask = sample_weight norm_factor = mask / tf.reduce_sum(mask) return tf.reduce_sum(entropies * norm_factor) class ClassificationLoss(tf.keras.losses.Loss): def __init__(self, global_batch_size, padding_symbol=tf.constant(0), **kwargs): super(ClassificationLoss, self).__init__(reduction=tf.keras.losses.Reduction.SUM, **kwargs) self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64) self.name = "classification_loss" self.global_batch_size = tf.cast(global_batch_size, dtype=tf.float32) def call(self, y_true, y_pred): return classification_loss(y_true=y_true, y_pred=y_pred) / self.global_batch_size class ClassificationLossMetric(tf.keras.losses.Loss): def __init__(self, global_batch_size, padding_symbol=0, **kwargs): super(ClassificationLossMetric, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs) self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64) self.name = "classification_loss" self.global_batch_size = global_batch_size def call(self, y_true, y_pred): return tf.reduce_mean(classification_loss(y_true=y_true, y_pred=y_pred), axis=0) class AccuracyTopk(tf.keras.losses.Loss): def __init__(self, global_batch_size, padding_symbol=0, topk=1, **kwargs): super(AccuracyTopk, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs) self.name = '-'.join(['accuracy','top', str(topk)]) self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64) self.global_batch_size = global_batch_size self.topk = tf.constant(topk) def call(self, y_true, y_pred): y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64) sequence_mask = tf.cast(y_true != self.padding_symbol, dtype=tf.float32) return accuracy_topk(targets=y_true, logits=y_pred, sequence_mask=sequence_mask, topk=self.topk) if __name__ == '__main__': import numpy as np a = np.asarray([[[1,1.5,2,0], [4,3,0,0]], [[1,1.5,2,0], [4,3,0,0]]], dtype=np.float32) a_mask = [[1, 1],[1 , 0]] print(a_mask) b = np.asarray([[0, 0],[1, 1]], dtype=np.int64) print(accuracy_topk(logits=a,targets=b,sequence_mask=a_mask,topk=1))
10,276
46.578704
117
py
Reflect
Reflect-master/tf2_models/trainer.py
import tensorflow as tf import os from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp OPTIMIZER_DIC = {'adam': tf.keras.optimizers.Adam, 'radam': RectifiedAdam, } class Trainer(object): def __init__(self, hparams, strategy, model, task, train_params, log_dir, ckpt_dir): self.hparams = hparams self.model = model self.task = task self.train_params = train_params self.strategy = strategy lr_schedule = self.get_lr_schedule() self.optimizer = OPTIMIZER_DIC[self.train_params.optimizer](learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0) self.ckpt = tf.train.Checkpoint(step=tf.Variable(1, name='checkpoint_step'), optimizer=self.optimizer, net=self.model) self.manager = tf.train.CheckpointManager(self.ckpt, ckpt_dir, keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours, max_to_keep=2) with self.strategy.scope(): x, y = iter(self.task.valid_dataset).next() model(x) model.summary() model.compile( optimizer=self.optimizer, loss=self.task.get_loss_fn(), metrics=self.task.metrics())#[self.task.get_loss_fn()]) #tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),) summary_dir = os.path.join(log_dir, 'summaries') tf.io.gfile.makedirs(log_dir) self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(summary_dir, 'train')) tf.compat.v2.summary.experimental.set_step(self.optimizer.iterations) ckpt_callback = CheckpointCallback(manager=self.manager, ckpt=self.ckpt) summary_callback = SummaryCallback(summary_writer=self.summary_writer) self.callbacks = [ckpt_callback, summary_callback] def get_lr_schedule(self): if 'crs' in self.train_params.schedule: initial_learning_rate = self.train_params.learning_rate lr_schedule = ( tf.keras.experimental.CosineDecayRestarts( initial_learning_rate, self.train_params.decay_steps, t_mul=2.0, m_mul=0.9, alpha=0.001, )) elif self.train_params.optimizer == 'radam': initial_learning_rate = self.train_params.learning_rate lr_schedule = ExponentialDecayWithWarmpUp( initial_learning_rate=initial_learning_rate, decay_steps=self.train_params.decay_steps, hold_base_rate_steps=self.train_params.hold_base_rate_steps, decay_rate=0.96, warmup_steps=0.0) else: initial_learning_rate = self.train_params.learning_rate lr_schedule = ExponentialDecayWithWarmpUp( initial_learning_rate=initial_learning_rate, decay_steps=self.train_params.decay_steps, decay_rate=0.96, hold_base_rate_steps=self.train_params.hold_base_rate_steps, warmup_steps=self.train_params.warmup_steps) return lr_schedule def restore(self): with self.strategy.scope(): self.ckpt.restore(self.manager.latest_checkpoint) if self.manager.latest_checkpoint: print("Restored from {}".format(self.manager.latest_checkpoint)) else: print("Initializing from scratch.") def train(self): with self.strategy.scope(): with self.summary_writer.as_default(): print("initial learning rate:", self.model.optimizer.learning_rate(self.model.optimizer.iterations)) self.model.fit(self.task.train_dataset, epochs=self.train_params.num_train_epochs, steps_per_epoch=self.task.n_train_batches, validation_steps=self.task.n_valid_batches, callbacks=self.callbacks, validation_data=self.task.valid_dataset, verbose=2 )
3,931
39.536082
122
py
Reflect
Reflect-master/tfds_data/tal_agreement.py
from collections import Counter import tensorflow as tf import tensorflow_datasets as tfds import os import numpy as np from tensorflow_datasets.core.features.text import Tokenizer from tensorflow_datasets.core.features.text.text_encoder import write_lines_to_file, read_lines_from_file from prep_data.build_dictionary import build_and_save_dic from util import text_util, constants from util.text_util import deps_from_tsv, deps_to_tsv import string class SVAgreement(tfds.core.GeneratorBasedBuilder): """ This is the dataset for evaluating the ability of language models to learn syntax. Paper: Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies Tal Linzen, Emmanuel Dupoux, Yoav Goldberg """ VERSION = tfds.core.Version('0.1.0') CLASS_TO_CODE = {'VBZ': 0, 'VBP': 1} CODE_TO_CLASS = {x: y for y, x in CLASS_TO_CODE.items()} def __init__(self, **kwargs): super(SVAgreement, self).__init__(**kwargs) def _info(self): self.text_encoder_config = tfds.features.text.TextEncoderConfig( encoder_cls=tfds.features.text.SubwordTextEncoder, vocab_size=2 ** 13) return tfds.core.DatasetInfo( builder=self, # This is the description that will appear on the datasets page. description=("This is the dataset for subject verb agreement " "to assess the ability of language models to learn syntax"), # tfds.features.FeatureConnectors features=tfds.features.FeaturesDict({ "sentence": tfds.features.Text( encoder_config=self.text_encoder_config), # Here, labels can be of 5 distinct values. "verb_class": tfds.features.ClassLabel(names=["VBZ", "VBP"]), "verb_position": tf.int32, "n_intervening": tf.int32, "n_diff_intervening": tf.int32, "distance": tf.int32, "verb": tfds.features.Text() }), # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=("sentence", "verb_class"), # Homepage of the dataset for documentation urls=["https://github.com/TalLinzen/rnn_agreement"], # Bibtex citation for the dataset citation=r"""@article{my-awesome-dataset-2020, author = {Linzen, Tal; Dupoux,Emmanuel; Goldberg, Yoav},"}""", ) def _vocab_text_gen(self, input_file): for _, ex in self._generate_examples(input_file): yield ex["sentence"] def _split_generators(self, dl_manager): # Downloads the data and defines the splits # dl_manager is a tfds.download.DownloadManager that can be used to # download and extract URLs extracted_path = dl_manager.download_and_extract( 'http://tallinzen.net/media/rnn_agreement/agr_50_mostcommon_10K.tsv.gz') def make_splits(extracted_path, data_dir, prop_train=0.1, prop_valid=0.01): # for reproducibility np.random.seed(42) print('| read in the data') data = deps_from_tsv(extracted_path) print('| shuffling') np.random.shuffle(data) n_train = int(len(data) * prop_train) n_valid = int(len(data) * prop_valid) train = data[:n_train] valid = data[n_train: n_train + n_valid] test = data[n_train + n_valid:] print('| splitting') deps_to_tsv(train, os.path.join(data_dir, "train.tsv")) deps_to_tsv(valid, os.path.join(data_dir, "valid.tsv")) deps_to_tsv(test, os.path.join(data_dir, "test.tsv")) print('| done!') make_splits(extracted_path,self.data_dir) # Generate vocabulary from training data if SubwordTextEncoder configured self.info.features["sentence"].maybe_build_from_corpus( self._vocab_text_gen(os.path.join(self.data_dir, "train.tsv"))) # Specify the splits return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, gen_kwargs={ "input_file_path": os.path.join(self.data_dir, "train.tsv"), }, ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, gen_kwargs={ "input_file_path": os.path.join(self.data_dir, "valid.tsv"), }, ), tfds.core.SplitGenerator( name=tfds.Split.TEST, gen_kwargs={ "input_file_path": os.path.join(self._data_dir, "test.tsv"), }, ), ] def _generate_examples(self, input_file_path): """ Yields examples from the dataset :param input_file_path: :return: example """ # Read the input data out of the source files data = deps_from_tsv(input_file_path) # And yield examples as feature dictionaries example_id = 0 for example in data: example_id += 1 yield example_id, { "sentence": example['sentence'], "verb_class": example['verb_pos'], "verb_position": int(example['verb_index']) - 1, "n_intervening": example['n_intervening'], "n_diff_intervening": example['n_diff_intervening'], "distance": example['distance'], "verb": example['verb'] } def sentence_encoder(self): return self.info.features["sentence"].encoder def vocab_size(self): """Retrieves the dictionary mapping word indices back to words. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). Returns: The word index dictionary. """ return self.info.features["sentence"].encoder.vocab_size class WordSvAgreement(SVAgreement): """ This is the dataset for evaluating the ability of language models to learn syntax. Paper: Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies Tal Linzen, Emmanuel Dupoux, Yoav Goldberg """ VERSION = tfds.core.Version('0.1.0') CLASS_TO_CODE = {'VBZ': 0, 'VBP': 1} CODE_TO_CLASS = {x: y for y, x in CLASS_TO_CODE.items()} VOCAB_DIR = 'tal_agreement/vocab' def __init__(self, data_dir, **kwargs): self.vocab_dir = os.path.join(data_dir, self.VOCAB_DIR) super(WordSvAgreement, self).__init__(data_dir=data_dir, **kwargs) def _info(self): vocab = list(np.load(self.vocab_dir, allow_pickle=True).item().keys()) print("Vocab len: ", len(vocab)) self.text_encoder_config = tfds.features.text.TextEncoderConfig( encoder=tfds.features.text.TokenTextEncoder(vocab_list=vocab, oov_token=constants.unk, lowercase=False, tokenizer=tfds.features.text.Tokenizer( alphanum_only=True, reserved_tokens=[a for a in string.punctuation if a not in ['<', '>']] + constants.all ))) return tfds.core.DatasetInfo( builder=self, # This is the description that will appear on the datasets page. description=("This is the dataset for subject verb agreement " "to assess the ability of language models to learn syntax"), # tfds.features.FeatureConnectors features=tfds.features.FeaturesDict({ "sentence": tfds.features.Text( encoder_config=self.text_encoder_config), # Here, labels can be of 5 distinct values. "verb_class": tfds.features.ClassLabel(names=["VBZ", "VBP"]), "verb_position": tf.int32, "n_intervening": tf.int32, "n_diff_intervening": tf.int32, "distance": tf.int32, "verb": tfds.features.Text() }), # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=("sentence", "verb_class"), # Homepage of the dataset for documentation homepage="https://github.com/TalLinzen/rnn_agreement", # Bibtex citation for the dataset citation=r"""@article{my-awesome-dataset-2020, author = {Linzen, Tal; Dupoux,Emmanuel; Goldberg, Yoav},"}""", ) if __name__ == '__main__': databuilder = WordSvAgreement(data_dir='data') databuilder.download_and_prepare(download_dir='tmp/', download_config=tfds.download.DownloadConfig(register_checksums=True)) dataset = databuilder.as_dataset(split="validation", batch_size=1000) dataset = tfds.as_numpy(dataset) for batch in dataset: print("encoded_sentence:", batch['sentence']) print("decoded_sentence:", databuilder.sentence_encoder().decode(batch['sentence'][0])) print("verb class:", batch['verb_class'][0]) print("verb position:",batch['verb_position'][0]) print("distance:",batch['distance'][0]) break print(databuilder.vocab_size())
8,680
35.020747
106
py
Reflect
Reflect-master/tasks/task.py
import tensorflow as tf from distill.distill_util import get_masked_probs from distill.repsim_util import rep_loss from util import constants class Task(object): def __init__(self, task_params, num_replicas_in_sync=1, builder_cls=None, name='abstract_task', data_dir='data', output_padding=False): self.name = name self.task_params = task_params self.data_dir = data_dir self.builder_cls = builder_cls self.num_replicas_in_sync = num_replicas_in_sync self.add_cls = True if builder_cls: self.databuilder = self.builder_cls(data_dir=self.data_dir) self.input_padding_symbol = tf.constant(0, dtype=tf.int64) #tf.cast(self.sentence_encoder().encode(constants.pad)[0], dtype=tf.int64) if output_padding: self.output_padding_symbol = tf.constant(0, dtype=tf.int64) #tf.cast(self.sentence_encoder().encode(constants.pad)[0], dtype=tf.int64) else: self.output_padding_symbol = tf.cast(-1, dtype=tf.int64) self.setup_datasets() def sentence_encoder(self): raise NotImplementedError @property def padded_shapes(self): return ([None],[None]) def vocab_size(self): raise NotImplementedError def convert_examples(self, examples): raise NotImplementedError def get_probs_fn(self): return get_masked_probs def setup_datasets(self): assert self.databuilder self.info = self.databuilder.info self.n_train_batches = int(self.info.splits['train'].num_examples / self.task_params.batch_size) self.n_valid_batches = int(self.info.splits['validation'].num_examples / self.task_params.batch_size) self.n_test_batches = int(self.info.splits['test'].num_examples / self.task_params.batch_size) self.valid_dataset = self.databuilder.as_dataset(split="validation") self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.valid_dataset = self.valid_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes, padding_values=(self.input_padding_symbol,self.output_padding_symbol)) #self.valid_dataset = self.valid_dataset.cache() self.valid_dataset = self.valid_dataset.repeat() self.valid_dataset = self.valid_dataset.prefetch(tf.data.experimental.AUTOTUNE) self.test_dataset = self.databuilder.as_dataset(split="test") self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.test_dataset = self.test_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes, padding_values=(self.input_padding_symbol,self.output_padding_symbol)) self.test_dataset = self.test_dataset.repeat() self.test_dataset = self.test_dataset.prefetch(tf.data.experimental.AUTOTUNE) self.train_dataset = self.databuilder.as_dataset(split="train") self.train_dataset = self.train_dataset.shuffle(10000) self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.train_dataset = self.train_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes, padding_values=(self.input_padding_symbol,self.output_padding_symbol)) #self.train_dataset = self.train_dataset.cache() self.train_dataset = self.train_dataset.repeat() self.train_dataset = self.train_dataset.prefetch(tf.data.experimental.AUTOTUNE) def get_rep_loss(self): return rep_loss class RandomGaussianTask(object): def __init__(self, task_params, builder_cls=None, name='random_gaussian_task', data_dir='data'): self.name = name self.output_padding_symbol = 0 self.task_params = task_params self.data_dir = data_dir self.builder_cls = builder_cls if builder_cls: self.databuilder = self.builder_cls(data_dir=self.data_dir) self.setup_datasets() @property def padded_shapes(self): return ([None],[None]) def vocab_size(self): raise NotImplementedError def convert_examples(self, examples): raise NotImplementedError def get_probs_fn(self): return get_masked_probs def setup_datasets(self): assert self.builder_cls self.info = self.databuilder.info self.n_train_batches = int(self.info.splits['train'].num_examples / self.task_params.batch_size) self.n_valid_batches = int(self.info.splits['validation'].num_examples / self.task_params.batch_size) self.n_test_batches = int(self.info.splits['test'].num_examples / self.task_params.batch_size) self.valid_dataset = self.databuilder.as_dataset(split="validation") self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.valid_dataset = self.valid_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes) #self.valid_dataset = self.valid_dataset.cache() self.valid_dataset = self.valid_dataset.repeat() self.valid_dataset = self.valid_dataset.prefetch(tf.data.experimental.AUTOTUNE) self.test_dataset = self.databuilder.as_dataset(split="test") self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.test_dataset = self.test_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes) self.test_dataset = self.test_dataset.repeat() self.test_dataset = self.test_dataset.prefetch(tf.data.experimental.AUTOTUNE) self.train_dataset = self.databuilder.as_dataset(split="train") self.train_dataset = self.train_dataset.shuffle(10000) self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.train_dataset = self.train_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes) #self.train_dataset = self.train_dataset.cache() self.train_dataset = self.train_dataset.repeat() self.train_dataset = self.train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
6,704
47.586957
142
py
Reflect
Reflect-master/tasks/sv_agreement.py
import functools from distill.distill_util import DistillLoss, get_probs, SequenceDistillLoss, get_topk_masked_probs, get_masked_probs from tasks.task import Task import tensorflow as tf from tf2_models import metrics from tf2_models.metrics import masked_batch_perplexity, masked_perplexity, \ MaskedSequenceLoss, ClassificationLoss from tfds_data.tal_agreement import WordSvAgreement, SVAgreement from util import constants class SvAgreementLM(Task): def __init__(self, task_params, name='sv_agreement_lm', data_dir='data', builder_cls=SVAgreement): super(SvAgreementLM, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls, output_padding=True) @tf.function def convert_examples(self, examples): sentences = examples['sentence'] s_shape = tf.shape(sentences) #batch_size, length = s_shape[0], s_shape[1] bos = self.databuilder.sentence_encoder().encode(constants.bos) eos = self.databuilder.sentence_encoder().encode(constants.eos) sentence = tf.concat([bos, sentences, eos], axis=-1) return sentence[:-1],\ sentence[1:] def get_loss_fn(self): return MaskedSequenceLoss(padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), num_replicas_in_sync=self.task_params.num_replicas_in_sync) def vocab_size(self): return self.databuilder.vocab_size() def output_size(self): return self.vocab_size() def sentence_encoder(self): return self.databuilder.sentence_encoder() def get_distill_loss_fn(self, distill_params): return SequenceDistillLoss(tmp=distill_params.distill_temp, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)) def get_probs_fn(self): return get_masked_probs def metrics(self): return [MaskedSequenceLoss(padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)), functools.update_wrapper(functools.partial(masked_batch_perplexity, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)), masked_batch_perplexity), functools.update_wrapper(functools.partial(masked_perplexity, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)), masked_perplexity), metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=1), metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=2), metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=5) ] class WordSvAgreementLM(SvAgreementLM): def __init__(self, task_params, name='word_sv_agreement_lm', data_dir='data', builder_cls=WordSvAgreement): super(WordSvAgreementLM, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls) class WordSvAgreementVP(Task): def __init__(self, task_params, name='word_sv_agreement_vp', data_dir='data', builder_cls=WordSvAgreement): super(WordSvAgreementVP, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls, output_padding=False) @property def padded_shapes(self): return ([None],[]) @tf.function def convert_examples(self, examples): sentences = examples['sentence'] #bos = self.databuilder.sentence_encoder().encode(constants.bos) eos = self.databuilder.sentence_encoder().encode(constants.eos) sentences = tf.concat([sentences, eos], axis=-1) verb_position = examples['verb_position'] # The verb it self is also masked mask = tf.cast(tf.sequence_mask(verb_position,maxlen=tf.shape(sentences)[0]), dtype=tf.int64) max_length = tf.reduce_max(verb_position + 1) last_index_mask = tf.eye(tf.shape(sentences)[0], dtype=tf.int64)[verb_position] last_index_mask = last_index_mask * eos[0] return (sentences * mask + last_index_mask)[:max_length], \ examples['verb_class'] def vocab_size(self): return self.databuilder.vocab_size() def output_size(self): return 2 def get_loss_fn(self): return ClassificationLoss(global_batch_size=tf.constant(self.task_params.batch_size), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)) def get_distill_loss_fn(self, distill_params): return DistillLoss(tmp=tf.constant(distill_params.distill_temp), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)) def get_probs_fn(self): return get_probs def metrics(self): return [ClassificationLoss(global_batch_size=tf.constant(self.task_params.batch_size), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)), tf.keras.metrics.SparseCategoricalAccuracy()] def sentence_encoder(self): return self.databuilder.sentence_encoder()
5,424
44.208333
163
py
Reflect
Reflect-master/tasks/mnist.py
from distill.distill_util import DistillLoss, get_probs from tasks.task import Task import tensorflow as tf import tensorflow_datasets as tfds from tf2_models.metrics import ClassificationLoss from tfds_data.aff_nist import AffNist class Mnist(Task): def __init__(self, task_params, name='mnist', data_dir='mnist_data'): self.databuilder = tfds.builder("mnist") super(Mnist, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=None) def vocab_size(self): return 28*28 def output_size(self): return 10 def get_loss_fn(self): return ClassificationLoss(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(-1, dtype=tf.int64)) def get_distill_loss_fn(self, distill_params): return DistillLoss(tmp=distill_params.distill_temp) def get_probs_fn(self): return get_probs def metrics(self): return [ClassificationLoss(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(-1, dtype=tf.int64)), tf.keras.metrics.SparseCategoricalAccuracy()] @property def padded_shapes(self): # To make sure we are not using this! raise NotImplementedError def convert_examples(self, examples): return tf.cast(examples['image'], dtype=tf.float32)/255, tf.cast(examples['label'], dtype=tf.int32) def setup_datasets(self): self.info = self.databuilder.info self.n_train_batches = int( self.info.splits['train'].num_examples / self.task_params.batch_size) self.n_test_batches = int( self.info.splits['test'].num_examples / self.task_params.batch_size) self.n_valid_batches = int( self.info.splits['test'].num_examples / self.task_params.batch_size) self.databuilder.download_and_prepare(download_dir=self.data_dir) self.test_dataset = self.databuilder.as_dataset(split="test") assert isinstance(self.test_dataset, tf.data.Dataset) self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.test_dataset = self.test_dataset.repeat() self.test_dataset = self.test_dataset.batch( batch_size=self.task_params.batch_size) self.test_dataset = self.test_dataset.prefetch( tf.data.experimental.AUTOTUNE) self.train_dataset = self.databuilder.as_dataset(split="train") assert isinstance(self.train_dataset, tf.data.Dataset) self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.train_dataset = self.train_dataset.repeat() self.train_dataset = self.train_dataset.shuffle(1024) self.train_dataset = self.train_dataset.batch( batch_size=self.task_params.batch_size) # self.train_dataset = self.train_dataset.cache() self.train_dataset = self.train_dataset.prefetch( tf.data.experimental.AUTOTUNE) self.valid_dataset = self.databuilder.as_dataset(split="test") assert isinstance(self.valid_dataset, tf.data.Dataset) self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.valid_dataset = self.valid_dataset.repeat() self.valid_dataset = self.valid_dataset.batch( batch_size=self.task_params.batch_size) self.valid_dataset = self.valid_dataset.prefetch( tf.data.experimental.AUTOTUNE) class AffNistTask(Task): def __init__(self, task_params, name='aff_nist',data_dir='data', builder_cls=AffNist): super(AffNistTask, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls) def input_shape(self): """ To be used when calling model.build(input_shape) :return: #[batch_size, height, width, channels """ return [None, 32, 32, 1] def vocab_size(self): return 40*40 def output_size(self): return 10 def get_loss_fn(self): return ClassificationLoss(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(-1, dtype=tf.int64)) def get_distill_loss_fn(self, distill_params): return DistillLoss(tmp=distill_params.distill_temp) def get_probs_fn(self): return get_probs def metrics(self): return [ClassificationLoss(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(-1, dtype=tf.int64)), tf.keras.metrics.SparseCategoricalAccuracy()] @property def padded_shapes(self): # To make sure we are not using this! raise NotImplementedError def convert_examples(self, examples): return tf.cast(examples['image'], dtype=tf.float32)/255, tf.cast(examples['label'], dtype=tf.int32) def setup_datasets(self): self.info = self.databuilder.info self.n_train_batches = int( self.info.splits['train'].num_examples / self.task_params.batch_size) self.n_test_batches = int( self.info.splits['test'].num_examples / self.task_params.batch_size) self.n_valid_batches = int( self.info.splits['test'].num_examples / self.task_params.batch_size) self.test_dataset = self.databuilder.as_dataset(split="test") assert isinstance(self.test_dataset, tf.data.Dataset) self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.test_dataset = self.test_dataset.repeat() self.test_dataset = self.test_dataset.batch( batch_size=self.task_params.batch_size) self.test_dataset = self.test_dataset.prefetch( tf.data.experimental.AUTOTUNE) self.train_dataset = self.databuilder.as_dataset(split="train") assert isinstance(self.train_dataset, tf.data.Dataset) self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.train_dataset = self.train_dataset.repeat() self.train_dataset = self.train_dataset.shuffle(1024) self.train_dataset = self.train_dataset.batch( batch_size=self.task_params.batch_size) # self.train_dataset = self.train_dataset.cache() self.train_dataset = self.train_dataset.prefetch( tf.data.experimental.AUTOTUNE) self.valid_dataset = self.databuilder.as_dataset(split="test") assert isinstance(self.valid_dataset, tf.data.Dataset) self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE) self.valid_dataset = self.valid_dataset.repeat() self.valid_dataset = self.valid_dataset.batch( batch_size=self.task_params.batch_size) self.valid_dataset = self.valid_dataset.prefetch( tf.data.experimental.AUTOTUNE) class Svhn(Mnist): def __init__(self, task_params, name='svhn', data_dir='mnist_data'): self.databuilder = tfds.builder("svhn_cropped") super(Mnist, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=None) def vocab_size(self): return 32 * 32 def input_shape(self): """ To be used when calling model.build(input_shape) :return: #[batch_size, height, width, channels """ return [None, 32, 32, 1] class Mnist40(Mnist): def __init__(self, task_params, name='mnist40', data_dir='mnist_data'): self.databuilder = tfds.builder("mnist") super(Mnist, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=None) def vocab_size(self): return 40 * 40 def output_size(self): return 10 def input_shape(self): """ To be used when calling model.build(input_shape) :return: #[batch_size, height, width, channels """ return [None, 32, 32, 1] def convert_examples(self, examples): pad_length = int((40 - 28) / 2) return tf.pad(tf.cast(examples['image'], dtype=tf.float32) / 255, ([pad_length, pad_length], [pad_length, pad_length], [0, 0])), tf.cast( examples['label'], dtype=tf.int32)
8,663
37.678571
103
py
Reflect
Reflect-master/tasks/evaluations/lm_sv_agreement_eval.py
''' Evaluate word based language models on the subject verb agreement task. Codes adapted from: Example Run: python tasks/evaluations/lm_sv_agreement_eval.py \ --exp_name=lisa_fd4 \ --model_name=lm_gpt2 \ --model_config=very_big_gpt_v10 \ --train_config=adam_slow \ --prefix=offline_pure_distill_2_teacher_lm_lstm_shared_emb_em-512_h-512_d-2_hdrop-0.3_indrop-0.2_0.001_lisa_offlineteacher_v1_student \ --withlr=False \ --chkpt_dir=tf_ckpts \ --logdir=logs ''' import os from tasks.sv_agreement import WordSvAgreementLM from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from util import constants from collections import Counter from tqdm import tqdm from tf2_models.metrics import * import numpy as np from absl import flags from absl import app from util.models import MODELS from util.text_util import gen_inflect_from_vocab FLAGS = flags.FLAGS flags.DEFINE_string('logdir', 'logs', ' log dir path') flags.DEFINE_string('chkpt_dir', 'chkpt_dir', ' chkpt_dir path') flags.DEFINE_string('prefix', 'prefix', ' prefix') flags.DEFINE_string('exp_name', 'tune_withl2_withpunc', 'tune_withl2_withpunc | withl2_batchsumloss_withpunc') flags.DEFINE_string('model_config', 'very_big_gpt_v10', 'big_gpt_v5 | very_big_gpt_v10| lstm_drop31_v2') flags.DEFINE_string('model_name', 'lm_gpt2_shared', 'lm_gpt2_shared | lm_gpt1 | lm_lstm_shared_emb') flags.DEFINE_string('train_config', 'adam_slw', ' adam_slw | radam_fst') flags.DEFINE_string('split', 'test', ' valid | test | train') flags.DEFINE_boolean('withlr', True, 'True | False') hparams = flags.FLAGS def compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total): ''' Computes and prints accuracy based on hits :param distance_hits: :param distance_total: :param diff_hits: :param diff_total: :return: None ''' dis_acc = np.zeros(16) dif_acc = np.zeros(5) total_nominator = 0.0 total_denominator = 0.0 print('Accuracy by distance') for k in sorted(distance_hits.keys()): v = distance_hits[k] acc = v / distance_total[k] dis_acc[k-1] = acc print("%d | %.2f" % (k, acc), distance_total[k]) total_nominator += v total_denominator += distance_total[k] print("Micro accuracy (distance):", total_nominator / total_denominator) print("Macro accuracy (distance):", np.mean(dis_acc)) print('Accuracy by intervenings:') total_nominator = 0.0 total_denominator = 0.0 for k in sorted(diff_hits.keys()): v = diff_hits[k] acc = v * 1. / diff_total[k] print("%d | %.2f" % (k, acc), diff_total[k]) dif_acc[k] = acc total_nominator += v total_denominator += diff_total[k] print("Micro accuracy (intervenings):", total_nominator / total_denominator) print("Macro accuracy (intervenings):", np.mean(dif_acc)) def evaluate_vp(model, task, split='test'): ''' Computes the accuracy statistics of the given model on the subject verb agreement task. :param model: the models to be evaluated :param task: :return: distance_hits, distance_total, diff_hits, diff_total ''' verb_infl, noun_infl = gen_inflect_from_vocab('data/tal_agreement/wiki.vocab') distance_hits = Counter() distance_total = Counter() diff_hits = Counter() diff_total = Counter() test_data = task.databuilder.as_dataset(split=split, batch_size=1000) for example in tqdm(test_data): encoded_sentences = example['sentence'] s_shape = tf.shape(encoded_sentences) batch_size, length = s_shape[0], s_shape[1] bos = tf.ones((batch_size, 1), dtype=tf.int64) * task.databuilder.sentence_encoder().encode(constants.bos) eos = tf.ones((batch_size, 1), dtype=tf.int64) * task.databuilder.sentence_encoder().encode(constants.eos) encoded_sentences = tf.concat([bos, encoded_sentences, eos], axis=1) actual_verbs = example['verb'] inflected_verbs = [verb_infl[v.decode("utf-8")] for v in actual_verbs.numpy()] verb_indexes = example['verb_position'] distances = example['distance'].numpy() nz = example['n_intervening'].numpy() n_diffs = example['n_diff_intervening'].numpy() actual_verb_indexes = [task.databuilder.sentence_encoder().encode(v)[0] for v in actual_verbs.numpy()] inflected_verb_indexes = [task.databuilder.sentence_encoder().encode(v)[0] for v in inflected_verbs] scores = model(encoded_sentences) actual_batch_indexes = [(i, verb_indexes[i], actual_verb_indexes[i]) for i in range(len(verb_indexes))] actual_scores = tf.compat.v2.gather_nd(scores, actual_batch_indexes) inflected_batch_indexes = [(i, verb_indexes[i], inflected_verb_indexes[i]) for i in range(len(verb_indexes))] infelected_scores = tf.compat.v2.gather_nd(scores, inflected_batch_indexes) corrects = actual_scores > infelected_scores for i, c in enumerate(corrects): if nz[i] > 4 or distances[i] > 16: continue distance_total[distances[i]] += 1 distance_hits[distances[i]] += int(c) if nz[i] == n_diffs[i]: n = nz[i] diff_total[n] += 1 diff_hits[n] += int(c) return distance_hits, distance_total, diff_hits, diff_total def main(argv): task = WordSvAgreementLM(task_params=get_task_params(), data_dir='data') # Create the Model model_params = get_model_params(task, hparams.model_name, hparams.model_config) print("model_params: ", model_params.__dict__) cl_token = task.databuilder.sentence_encoder().encode(constants.bos) model = MODELS[hparams.model_name](hparams=get_model_params(task, hparams.model_name, hparams.model_config), cl_token=cl_token) trainer_params = get_train_params(hparams.train_config) if len(hparams.prefix) > 0: hparams.prefix = hparams.prefix + "_" log_dir = os.path.join(hparams.logdir, task.name, hparams.prefix+model.model_name + "_" + str(hparams.model_config) + "_" + str( trainer_params.learning_rate) + "_" + hparams.exp_name) ckpt_dir = os.path.join(hparams.chkpt_dir, task.name, hparams.prefix+model.model_name + "_" + str(hparams.model_config) + "_" + ((str( trainer_params.learning_rate) + "_") if hparams.withlr else '') + hparams.exp_name) print(ckpt_dir) trainer = Trainer(task=task, model=model, train_params=trainer_params, log_dir=log_dir, ckpt_dir=ckpt_dir) trainer.restore() distance_hits, distance_total, diff_hits, diff_total = evaluate_vp(trainer.model, trainer.task, hparams.split) compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total) if __name__ == '__main__': app.run(main)
6,755
36.955056
135
py
Reflect
Reflect-master/notebooks/notebook_utils.py
import tensorflow as tf import numpy as np import os from tqdm import tqdm from util import constants from collections import Counter from util.models import MODELS from util.tasks import TASKS from util.config_util import get_model_params, get_task_params, get_train_params import matplotlib.pyplot as plt import pandas as pd import seaborn as sns; sns.set() sns.set_style("whitegrid") from util import inflect dependency_fields = ['sentence', 'orig_sentence', 'pos_sentence', 'subj', 'verb', 'subj_pos', 'has_rel', 'has_nsubj', 'verb_pos', 'subj_index', 'verb_index', 'n_intervening', 'last_intervening', 'n_diff_intervening', 'distance', 'max_depth', 'all_nouns', 'nouns_up_to_verb'] def get_model(config, task, hparams, cl_token, **kwargs): model = MODELS[config['model_name']](hparams=hparams, cl_token=cl_token, **kwargs) ckpt_dir = os.path.join(config['chkpt_dir'],task.name, model.model_name+"_"+str(config['model_config'])+"_"+str(config['learning_rate'])+"_"+config['exp_name']) ckpt = tf.train.Checkpoint(net=model) manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=None) ckpt.restore(manager.latest_checkpoint) if manager.latest_checkpoint: print("Restored student from {}".format(manager.latest_checkpoint)) else: print("No checkpoint found {}".format(ckpt_dir)) model.compile(loss=task.get_loss_fn(), metrics=task.metrics()) return model, ckpt def get_student_model(config, task, hparams, cl_token): teacher_model = MODELS[config['teacher_model']](hparams=get_model_params(task, config['teacher_model'], config['teacher_config']), cl_token=cl_token) model = MODELS[config['student_model']](hparams=hparams, cl_token=cl_token) ckpt_dir = os.path.join(config['chkpt_dir'], task.name, '_'.join([config['distill_mode'],config['distill_config'], "teacher", teacher_model.model_name, config['teacher_config'], config['teacher_exp_name'], "student",model.model_name, str(config['student_config']), config['student_exp_name']])) print("student_checkpoint:", ckpt_dir) ckpt = tf.train.Checkpoint(net=model) manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=None) ckpt.restore(manager.latest_checkpoint) if manager.latest_checkpoint: print("Restored student from {}".format(manager.latest_checkpoint)) else: print("No checkpoint found {}".format(ckpt_dir)) model.compile(loss=task.get_loss_fn(), metrics=task.metrics()) return model, ckpt def get_teacher_model(config, task, hparams, cl_token): model = MODELS[config['teacher_model']](hparams=hparams, cl_token=cl_token) ckpt_dir = os.path.join(config['chkpt_dir'], task.name, '_'.join([model.model_name, config['teacher_config'],config['teacher_exp_name']])) ckpt = tf.train.Checkpoint(net=model) manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=None) ckpt.restore(manager.latest_checkpoint) ckpt.restore(manager.latest_checkpoint) if manager.latest_checkpoint: print("Restored student from {}".format(manager.latest_checkpoint)) else: print("No checkpoint found {}".format(ckpt_dir)) model.compile(loss=task.get_loss_fn(), metrics=task.metrics()) return model, ckpt def gen_inflect_from_vocab(infl_eng, vocab_file, freq_threshold=1000): vbp = {} vbz = {} nn = {} nns = {} from_pos = {'NNS': nns, 'NN': nn, 'VBP': vbp, 'VBZ': vbz} for line in open(vocab_file): if line.startswith(' '): # empty string token continue word, pos, count = line.strip().split() count = int(count) if len(word) > 1 and pos in from_pos and count >= freq_threshold: from_pos[pos][word] = count verb_infl = {'VBP': 'VBZ', 'VBZ': 'VBP'} for word, count in vbz.items(): candidate = infl_eng.plural_verb(word) if candidate in vbp: verb_infl[candidate] = word verb_infl[word] = candidate noun_infl = {'NN': 'NNS', 'NNS': 'NN'} for word, count in nn.items(): candidate = infl_eng.plural_noun(word) if candidate in nns: noun_infl[candidate] = word noun_infl[word] = candidate return verb_infl, noun_infl def compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total): ''' Computes and prints accuracy based on hits :param distance_hits: :param distance_total: :param diff_hits: :param diff_total: :return: None ''' dis_acc = np.zeros(16) dif_acc = np.zeros(5) total_nominator = 0.0 total_denominator = 0.0 print('Accuracy by distance') for k in sorted(distance_hits.keys()): v = distance_hits[k] acc = v / distance_total[k] dis_acc[k-1] = acc print("%d | %.2f" % (k, acc), distance_total[k]) total_nominator += v total_denominator += distance_total[k] print("Micro accuracy (distance):", total_nominator / total_denominator) print("Macro accuracy (distance):", np.mean(dis_acc)) print('Accuracy by intervenings:') total_nominator = 0.0 total_denominator = 0.0 for k in sorted(diff_hits.keys()): v = diff_hits[k] acc = v * 1. / diff_total[k] print("%d | %.2f" % (k, acc), diff_total[k]) dif_acc[k] = acc total_nominator += v total_denominator += diff_total[k] print("Micro accuracy (intervenings):", total_nominator / total_denominator) print("Macro accuracy (intervenings):", np.mean(dif_acc)) def evaluate_vp_cl(model, verb_infl, noun_infl, task, split='test', batch_size=1000, cls=False): distance_hits = Counter() distance_total = Counter() diff_hits = Counter() diff_total = Counter() test_data = task.databuilder.as_dataset(split=split, batch_size=batch_size) e = 0 for examples in test_data: e += 1 sentences = examples['sentence'] #bos = tf.cast(task.databuilder.sentence_encoder().encode(constants.bos) * tf.ones((sentences.shape[0],1)), dtype=tf.int64) eos = tf.cast(task.databuilder.sentence_encoder().encode(constants.eos) *tf.ones((sentences.shape[0],1)), dtype=tf.int64) sentences = tf.concat([sentences, eos], axis=-1) verb_position = examples['verb_position']+int(cls) #+1 because of adding bos. # The verb it self is also masked mask = tf.cast(tf.sequence_mask(verb_position,maxlen=tf.shape(sentences)[1]), dtype=tf.int64) max_length = tf.reduce_max(verb_position + 1) last_index_mask = tf.gather(tf.eye(tf.shape(sentences)[1], dtype=tf.int64),verb_position) last_index_mask = last_index_mask * eos[0] inputs = (sentences * mask + last_index_mask)[:,:max_length] s_shape = tf.shape(inputs) batch_size, length = s_shape[0], s_shape[1] verb_classes = examples['verb_class'] actual_verbs = examples['verb'] distances = examples['distance'].numpy() nz = examples['n_intervening'].numpy() n_diffs = examples['n_diff_intervening'].numpy() actual_verb_indexes = [task.databuilder.sentence_encoder().encode(v)[0] for v in actual_verbs.numpy()] predictions = model(inputs, training=False) predictions = np.argmax(predictions, axis=-1) corrects = predictions == verb_classes for i, c in enumerate(corrects): if actual_verb_indexes[i] == 10035 or actual_verb_indexes[i] == 2: continue if nz[i] > 4 or distances[i] > 16: continue distance_total[distances[i]] += 1 distance_hits[distances[i]] += int(c) if nz[i] == n_diffs[i]: n = nz[i] diff_total[n] += 1 diff_hits[n] += int(c) return distance_hits, distance_total, diff_hits, diff_total def test_for_calibration(model, task, n_bins=10): preds = [] correct_class_probs = [] predicted_class_probs = [] pred_logits = [] y_trues = [] batch_count = task.n_valid_batches for x, y in task.valid_dataset: logits = model(x) pred_logits.extend(logits.numpy()) pred = tf.argmax(logits, axis=-1) prob = task.get_probs_fn()(logits, labels=y, temperature=1) preds.extend(pred.numpy()) y_trues.extend(y.numpy()) batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32) true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1) pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1) correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy()) predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy()) batch_count -= 1 if batch_count == 0: break model_accuracy = np.asarray(preds) == np.asarray(y_trues) return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues def plot_calibration(model_accuracy, predicted_class_probs, correct_class_probs, n_bins=10): p_confidence_bins = np.zeros(n_bins+1) n_confidence_bins = np.zeros(n_bins+1) total_confidence_bins = np.zeros(n_bins+1) denominator = 100.0 / n_bins for i in np.arange(len(model_accuracy)): if model_accuracy[i]: p_confidence_bins[int(predicted_class_probs[i]*100 / denominator)] += 1.0 else: n_confidence_bins[int(predicted_class_probs[i]*100 / denominator)] -= 1.0 total_confidence_bins[int(predicted_class_probs[i]*100 / denominator)] += 1 #sns.stripplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5, jitter=True) #sns.stripplot(model_accuracy,correct_class_probs, color='green', alpha=0.2, jitter=True) #sns.swarmplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5) #plt.show() sns.barplot(x=np.arange(0,n_bins)*denominator, y=np.arange(0,n_bins)/n_bins, color='green', alpha=0.2, edgecolor='black') ax = sns.barplot(x=np.arange(0,n_bins)*denominator, y=p_confidence_bins[1:]/total_confidence_bins[1:], color='red', alpha=0.5, edgecolor='black') x_ticks = np.arange(0,n_bins,2) x_tick_labels = x_ticks / np.float32(n_bins) ax.set_xticks(x_ticks) ax.set_xticklabels(x_tick_labels, fontsize=10) def expected_calibration_error(teacher_accuracy, teacher_predicted_class_probs): raise NotImplemented
10,989
36.508532
153
py
Reflect
Reflect-master/notebooks/calibration_util.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS import tensorflow_probability as tfp import matplotlib.pyplot as plt import pandas as pd import seaborn as sns; sns.set() sns.set_style("whitegrid") from tqdm import tqdm def test_for_calibration(model, task, n_bins=10): preds = [] correct_class_probs = [] predicted_class_probs = [] pred_logits = [] y_trues = [] batch_count = task.n_valid_batches for x, y in task.valid_dataset: logits = model(x) pred_logits.extend(logits.numpy()) pred = tf.argmax(logits, axis=-1) prob = task.get_probs_fn()(logits, labels=y, temperature=1) preds.extend(pred.numpy()) y_trues.extend(y.numpy()) batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int64), dtype=tf.int64) true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1) pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int64)], axis=1) correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy()) predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy()) batch_count -= 1 if batch_count == 0: break model_accuracy = np.asarray(preds) == np.asarray(y_trues) return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues def plot_calibration(model_accuracy, predicted_class_probs, correct_class_probs, n_bins=10): p_confidence_bins = np.zeros(n_bins) n_confidence_bins = np.zeros(n_bins) total_confidence_bins = np.zeros(n_bins) denominator = 100.0 / n_bins for i in np.arange(len(model_accuracy)): if model_accuracy[i]: p_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] += 1.0 else: n_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] -= 1.0 total_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] += 1 #sns.stripplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5, jitter=True) #sns.stripplot(model_accuracy,correct_class_probs, color='green', alpha=0.2, jitter=True) #sns.swarmplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5) #plt.show() sns.barplot(x=np.arange(0,n_bins)*denominator, y=np.arange(0,n_bins)/n_bins, color='green', alpha=0.2, edgecolor='black') ax = sns.barplot(x=np.arange(0,n_bins)*denominator, y=p_confidence_bins/total_confidence_bins, color='red', alpha=0.5, edgecolor='black') x_ticks = np.arange(0,n_bins,2) x_tick_labels = x_ticks / np.float32(n_bins) ax.set_xticks(x_ticks) ax.set_xticklabels(x_tick_labels, fontsize=10) return p_confidence_bins,n_confidence_bins,total_confidence_bins def expected_calibration_error(teacher_accuracy, teacher_predicted_class_probs): raise NotImplemented
3,258
38.26506
100
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_vp.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * import pandas as pd import seaborn as sns; sns.set() from collections import Counter from tqdm import tqdm log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) models = {} labels = [] config={'student_exp_name':'gc_f_std124', 'teacher_exp_name':'gc_o_tchr124', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token) models['lstm_124'] = teacher_model labels.append('lstm_124') config={'student_exp_name':'gc_f_std125', 'teacher_exp_name':'gc_o_tchr125', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token) models['lstm_125'] = teacher_model labels.append('lstm_125') config={'student_exp_name':'gc_f_std130', 'teacher_exp_name':'gc_o_tchr130', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token) models['lstm_130'] = teacher_model labels.append('lstm_130') config={'student_exp_name':'gc_f_std131', 'teacher_exp_name':'gc_o_tchr131', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token) models['lstm_131'] = teacher_model labels.append('lstm_131') keys = labels import tensorflow_probability as tfp def test_for_calibration(model, task, n_bins=10): preds = [] correct_class_probs = [] predicted_class_probs = [] pred_logits = [] y_trues = [] batch_count = task.n_valid_batches for x, y in task.valid_dataset: y = tf.cast(y, tf.int32) logits = model(x) pred_logits.extend(logits.numpy()) pred = tf.argmax(logits, axis=-1) prob = task.get_probs_fn()(logits, labels=y, temperature=1) preds.extend(pred.numpy()) y_trues.extend(y.numpy()) batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32) true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1) pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1) correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy()) predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy()) batch_count -= 1 if batch_count == 0: break model_accuracy = np.asarray(preds) == np.asarray(y_trues) return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues # for key in keys: # model = models[key] # train = model.evaluate(task.train_dataset, steps=task.n_train_batches) # valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches) # test = model.evaluate(task.test_dataset, steps=task.n_test_batches) # print(key) # print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2]) for key in keys: model = models[key] print('##################################') model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20) model_ece = tfp.stats.expected_calibration_error( 1000000, logits=model_logits, labels_true=model_trues, ) print(model_ece.numpy())
5,193
28.68
139
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_vp-bert.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * import pandas as pd import seaborn as sns; sns.set() from collections import Counter from tqdm import tqdm import logging tf.get_logger().setLevel(logging.ERROR) log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(batch_size=512),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) students = [] models = [] labels = [] #Bert to LSTM config={'student_exp_name':'gc_f_std9303', 'teacher_exp_name':'gc_o_tchr8323', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp9', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('bert2lstm_1') config={'student_exp_name':'gc_f_std9304', 'teacher_exp_name':'gc_o_tchr8324', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp9', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('bert2lstm_2') config={'student_exp_name':'gc_f_std9301', 'teacher_exp_name':'gc_o_tchr9301', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp9', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('bert2lstm_3') config={'student_exp_name':'gc_f_std9302', 'teacher_exp_name':'gc_o_tchr9302', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp9', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('bert2lstm_4') config={'student_exp_name':'gc_f_std8331', 'teacher_exp_name':'gc_o_tchr8321', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_gpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2ugpt_1') config={'student_exp_name':'gc_f_std8332', 'teacher_exp_name':'gc_o_tchr8322', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_gpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2ugpt_2') config={'student_exp_name':'gc_f_std8333', 'teacher_exp_name':'gc_o_tchr8323', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_gpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2ugpt_3') config={'student_exp_name':'gc_f_std8334', 'teacher_exp_name':'gc_o_tchr8324', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_gpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2ugpt_4') config={'student_exp_name':'gc_f_std8311', 'teacher_exp_name':'gc_o_tchr8311', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_bert', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2bert_1') config={'student_exp_name':'gc_f_std8312', 'teacher_exp_name':'gc_o_tchr8322', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_bert', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2bert_2') config={'student_exp_name':'gc_f_std8313', 'teacher_exp_name':'gc_o_tchr8323', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_bert', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2bert_3') config={'student_exp_name':'gc_f_std8314', 'teacher_exp_name':'gc_o_tchr8324', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_bert', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2bert_4') config={'student_exp_name':'gc_f_std8321', 'teacher_exp_name':'gc_o_tchr8321', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2gpt_1') config={'student_exp_name':'gc_f_std8322', 'teacher_exp_name':'gc_o_tchr8322', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2gpt_2') config={'student_exp_name':'gc_f_std8323', 'teacher_exp_name':'gc_o_tchr8323', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2gpt_3') config={'student_exp_name':'gc_f_std8324', 'teacher_exp_name':'gc_o_tchr8324', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_gpt2', 'teacher_config':'small_gpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp8', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model[0]) labels.append('bert2gpt_4') keys = labels import tensorflow_probability as tfp def test_for_calibration(model, task, n_bins=10): preds = [] correct_class_probs = [] predicted_class_probs = [] pred_logits = [] y_trues = [] batch_count = task.n_valid_batches for x, y in task.valid_dataset: y = tf.cast(y, tf.int32) logits = model(x) pred_logits.extend(logits.numpy()) pred = tf.argmax(logits, axis=-1) prob = task.get_probs_fn()(logits, labels=y, temperature=1) preds.extend(pred.numpy()) y_trues.extend(y.numpy()) batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32) true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1) pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1) correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy()) predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy()) batch_count -= 1 if batch_count == 0: break model_accuracy = np.asarray(preds) == np.asarray(y_trues) return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues for key,model in zip(labels, students): print('##################################') train = model.evaluate(task.train_dataset, steps=task.n_train_batches,verbose=0) valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches,verbose=0) test = model.evaluate(task.test_dataset, steps=task.n_test_batches,verbose=0) print(key) print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2]) # print("Teachers:") # for key,model in zip(labels, models): # model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20) # model_ece = tfp.stats.expected_calibration_error( # 20, # logits=model_logits, # labels_true=model_trues, # ) # print(key, model_ece.numpy()) print("Students:") for key,model in zip(labels, students): print('##################################') model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20) model_ece = tfp.stats.expected_calibration_error( 20, logits=model_logits, labels_true=model_trues, ) print(key, model_ece.numpy()) # infl_eng = inflect.engine() # verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab') # print(labels) # for key,model in zip(labels, models): # print(key) # distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task) # compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
19,508
33.962366
137
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_vp-ugpt.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * import pandas as pd import seaborn as sns; sns.set() from collections import Counter from tqdm import tqdm import logging tf.get_logger().setLevel(logging.ERROR) log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(batch_size=512),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) models = [] students = [] labels = [] #Bert to LSTM config={'student_exp_name':'gc_f_std4104', 'teacher_exp_name':'gc_o_tchr4112', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_ugpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2lstm_1') config={'student_exp_name':'gc_f_std4103', 'teacher_exp_name':'gc_o_tchr4113', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_ugpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2lstm_2') config={'student_exp_name':'gc_f_std4102', 'teacher_exp_name':'gc_o_tchr4102', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_ugpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2lstm_3') config={'student_exp_name':'gc_f_std4101', 'teacher_exp_name':'gc_o_tch4101', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_ugpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2lstm_4') config={'student_exp_name':'gc_f_std4131', 'teacher_exp_name':'gc_o_tchr4131', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_ugpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2ugpt_1') config={'student_exp_name':'gc_f_std4132', 'teacher_exp_name':'gc_o_tchr4132', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_ugpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2ugpt_2') config={'student_exp_name':'gc_f_std4130', 'teacher_exp_name':'gc_o_tchr4130', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_ugpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2ugpt_3') config={'student_exp_name':'gc_f_std4133', 'teacher_exp_name':'gc_o_tchr4123', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_gpt2_shared', 'teacher_config':'small_ugpt_v9', 'student_config':'small_ugpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2ugpt_4') config={'student_exp_name':'gc_f_std4110', 'teacher_exp_name':'gc_o_tchr4110', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_bert', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2bert_1') config={'student_exp_name':'gc_f_std4111', 'teacher_exp_name':'gc_o_tchr4111', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_bert', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2bert_2') config={'student_exp_name':'gc_f_std4112', 'teacher_exp_name':'gc_o_tchr4112', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_bert', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2bert_3') config={'student_exp_name':'gc_f_std4113', 'teacher_exp_name':'gc_o_tchr4113', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_bert', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2bert_4') config={'student_exp_name':'gc_f_std4120', 'teacher_exp_name':'gc_o_tchr4120', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_gpt2', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2gpt_1') config={'student_exp_name':'gc_f_std4121', 'teacher_exp_name':'gc_o_tchr4121', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_gpt2', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2gpt_2') config={'student_exp_name':'gc_f_std4122', 'teacher_exp_name':'gc_o_tchr4122', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_gpt2', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2gpt_3') config={'student_exp_name':'gc_f_std4123', 'teacher_exp_name':'gc_o_tchr4123', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_gpt2', 'teacher_config':'small_ugpt_v9', 'student_config':'small_gpt_v9', 'distill_config':'pure_dstl_4_exp_vp4', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) students.append(student_model) models.append(teacher_model) labels.append('ugpt2gpt_4') keys = labels import tensorflow_probability as tfp def test_for_calibration(model, task, n_bins=10): preds = [] correct_class_probs = [] predicted_class_probs = [] pred_logits = [] y_trues = [] batch_count = task.n_valid_batches for x, y in task.valid_dataset: y = tf.cast(y, tf.int32) logits = model(x) pred_logits.extend(logits.numpy()) pred = tf.argmax(logits, axis=-1) prob = task.get_probs_fn()(logits, labels=y, temperature=1) preds.extend(pred.numpy()) y_trues.extend(y.numpy()) batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32) true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1) pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1) correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy()) predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy()) batch_count -= 1 if batch_count == 0: break model_accuracy = np.asarray(preds) == np.asarray(y_trues) return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues # for key,model in zip(labels,models): # model = model[0] # train = model.evaluate(task.train_dataset, steps=task.n_train_batches, verbose=0) # valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches, verbose=0) # test = model.evaluate(task.test_dataset, steps=task.n_test_batches, verbose=0) # print(key) # print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2]) print("Teachers ****") for key,model in zip(labels,models): model = model[0] print('##################################') print(key) model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20) model_ece = tfp.stats.expected_calibration_error( 20, logits=model_logits, labels_true=model_trues, ) print(model_ece.numpy()) print("Students ****") for key,model in zip(labels,students): print('##################################') print(key) model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20) model_ece = tfp.stats.expected_calibration_error( 20, logits=model_logits, labels_true=model_trues, ) print(model_ece.numpy()) # infl_eng = inflect.engine() # verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab') # print(labels) # for key,model in zip(labels,students): # print(key) # distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task) # compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
19,655
33.851064
139
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_vp-lstm.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * import pandas as pd import seaborn as sns; sns.set() from collections import Counter from tqdm import tqdm import logging tf.get_logger().setLevel(logging.ERROR) log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(batch_size=512),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) models = [] labels = [] #Bert to LSTM config={'student_exp_name':'gc_f_std5004', 'teacher_exp_name':'gc_o_tchr5021', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp5', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) models.append(teacher_model) labels.append('lstm2lstm_1') config={'student_exp_name':'gc_f_std5001', 'teacher_exp_name':'gc_o_tchr5011', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp5', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) models.append(teacher_model) labels.append('lstm2lstm_2') config={'student_exp_name':'gc_f_std5002', 'teacher_exp_name':'gc_o_tchr5020', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp5', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) models.append(teacher_model) labels.append('lstm2lstm_3') config={'student_exp_name':'gc_f_std5003', 'teacher_exp_name':'gc_o_tchr5030', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_exp_vp5', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # config['distill_mode'] = 'online' # config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_') std_hparams=get_model_params(task, config['student_model'], config['student_config']) std_hparams.output_attentions = True std_hparams.output_embeddings = True student_model, ckpt = get_student_model(config, task, std_hparams, cl_token) tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token) models.append(teacher_model) labels.append('lstm2lstm_4') keys = labels import tensorflow_probability as tfp def test_for_calibration(model, task, n_bins=10): preds = [] correct_class_probs = [] predicted_class_probs = [] pred_logits = [] y_trues = [] batch_count = task.n_valid_batches for x, y in task.valid_dataset: y = tf.cast(y, tf.int32) logits = model(x) pred_logits.extend(logits.numpy()) pred = tf.argmax(logits, axis=-1) prob = task.get_probs_fn()(logits, labels=y, temperature=1) preds.extend(pred.numpy()) y_trues.extend(y.numpy()) batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32) true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1) pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1) correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy()) predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy()) batch_count -= 1 if batch_count == 0: break model_accuracy = np.asarray(preds) == np.asarray(y_trues) return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues # for key in keys: # model = models[key] # train = model.evaluate(task.train_dataset, steps=task.n_train_batches) # valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches) # test = model.evaluate(task.test_dataset, steps=task.n_test_batches) # print(key) # print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2]) # for key in keys: # model = models[key] # model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20) # model_ece = tfp.stats.expected_calibration_error( # 1000000, # logits=model_logits, # labels_true=model_trues, # ) # print(model_ece.numpy()) infl_eng = inflect.engine() verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab') print(labels) for key,model in zip(labels,models): model = model[0] print('##################################') print(key) distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task) compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
7,129
32.009259
139
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_full_sv_cl.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * import pandas as pd import seaborn as sns; sns.set() from collections import Counter from tqdm import tqdm log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) modelz = {} ckptz = {} config={'student_exp_name':'gc_f_std124', 'teacher_exp_name':'gc_o_tchr124', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['l2l_std124'] = model ckptz['l2l_std124'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['l2l_tchr124'] = teacher_model ckptz['l2l_tchr124'] = teacger_ckpt config={'student_exp_name':'gc_f_std125', 'teacher_exp_name':'gc_o_tchr125', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['l2l_std125'] = model ckptz['l2l_std125'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['l2l_tchr125'] = teacher_model ckptz['l2l_tchr125'] = teacger_ckpt config={'student_exp_name':'gc_f_std130', 'teacher_exp_name':'gc_o_tchr130', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['l2l_std130'] = model ckptz['l2l_std130'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['l2l_tchr130'] = teacher_model ckptz['l2l_tchr130'] = teacger_ckpt config={'student_exp_name':'gc_f_std131', 'teacher_exp_name':'gc_o_tchr131', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_lstm', 'student_model':'cl_lstm', 'teacher_config':'small_lstm_v4', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['l2l_std131'] = model ckptz['l2l_std131'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['l2l_tchr131'] = teacher_model ckptz['l2l_tchr131'] = teacger_ckpt infl_eng = inflect.engine() verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab') keys = modelz.keys() for key in keys: model = modelz[key] print('##################################') print(key, ckptz[key]) distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task) compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
4,451
29.285714
108
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_lm.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * from tqdm import tqdm log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_lm'](task_params=get_task_params(),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) modelz = {} ckptz = {} config = {'model_name':'lm_lstm_shared_emb', 'model_config':'lstm_drop31_v2', 'learning_rate':0.001, 'exp_name':'lisa_crs_fst_offlineteacher_v23', 'chkpt_dir': '../tf_ckpts' } hparams=get_model_params(task, config['model_name'], config['model_config']) hparams.output_attentions = True hparams.output_embeddings = True lstm1, lstm_ckpt1 = get_model(config, task, hparams, cl_token) modelz['lstm1'] = lstm1 ckptz['lstm1'] = lstm_ckpt1 config = {'model_name':'lm_lstm_shared_emb', 'model_config':'lstm_drop31_v2', 'learning_rate':0.001, 'exp_name':'lisa_crs_fst_offlineteacher_v24', 'chkpt_dir': '../tf_ckpts' } hparams=get_model_params(task, config['model_name'], config['model_config']) hparams.output_attentions = True hparams.output_embeddings = True lstm2, lstm_ckpt2 = get_model(config, task, hparams, cl_token) modelz['lstm2'] = lstm2 ckptz['lstm2'] = lstm_ckpt2 config = {'model_name':'lm_lstm_shared_emb', 'model_config':'lstm_drop31_v2', 'learning_rate':0.001, 'exp_name':'lisa_crs_fst_offlineteacher_v25', 'chkpt_dir': '../tf_ckpts' } hparams=get_model_params(task, config['model_name'], config['model_config']) hparams.output_attentions = True hparams.output_embeddings = True lstm3, lstm_ckpt3 = get_model(config, task, hparams, cl_token) modelz['lstm3'] = lstm3 ckptz['lstm3'] = lstm_ckpt3 keys = ['lstm1', 'lstm2'] print("Evaluations ...") for key in keys: model = modelz[key] print('##################################') print(ckptz[key]) train = model.evaluate(task.train_dataset, steps=task.n_train_batches) valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches) test = model.evaluate(task.test_dataset, steps=task.n_test_batches) print("train:", train) print("valid:", valid) print("test:", test)
2,524
28.360465
86
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_full_sv_cl_gpt2.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * import pandas as pd import seaborn as sns; sns.set() from collections import Counter from tqdm import tqdm log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) modelz = {} ckptz = {} config={'student_exp_name':'gc_f_std144', 'teacher_exp_name':'gc_o_tchr144', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # std_hparams=get_model_params(task, config['student_model'], config['student_config']) # model, ckpt = get_student_model(config, task, std_hparams, cl_token) # modelz['g2l_std144'] = model # ckptz['g2l_std144'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['ug2l_tchr144'] = teacher_model ckptz['ug2l_tchr144'] = teacger_ckpt config={'student_exp_name':'gc_f_std145', 'teacher_exp_name':'gc_o_tchr145', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # std_hparams=get_model_params(task, config['student_model'], config['student_config']) # model, ckpt = get_student_model(config, task, std_hparams, cl_token) # modelz['g2l_std145'] = model # ckptz['g2l_std145'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['ug2l_tchr145'] = teacher_model ckptz['ug2l_tchr145'] = teacger_ckpt config={'student_exp_name':'gc_f_std146', 'teacher_exp_name':'gc_o_tchr146', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # std_hparams=get_model_params(task, config['student_model'], config['student_config']) # model, ckpt = get_student_model(config, task, std_hparams, cl_token) # modelz['g2l_std146'] = model # ckptz['g2l_std146'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['ug2l_tchr146'] = teacher_model ckptz['ug2l_tchr146'] = teacger_ckpt config={'student_exp_name':'gc_f_std147', 'teacher_exp_name':'gc_o_tchr147', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_gpt2_shared', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } # std_hparams=get_model_params(task, config['student_model'], config['student_config']) # model, ckpt = get_student_model(config, task, std_hparams, cl_token) # modelz['g2l_std147'] = model # ckptz['g2l_std147'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['ug2l_tchr147'] = teacher_model ckptz['ug2l_tchr147'] = teacger_ckpt infl_eng = inflect.engine() verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab') keys = modelz.keys() for key in keys: model = modelz[key] print('##################################') print(key, ckptz[key]) distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task) compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
4,517
29.322148
108
py
Reflect
Reflect-master/notebooks/eval_scripts/eval_full_sv_cl_bert.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags import numpy as np from util.models import MODELS from util.tasks import TASKS from notebook_utils import * import pandas as pd import seaborn as sns; sns.set() from collections import Counter from tqdm import tqdm log_dir = "../logs" chkpt_dir = "../tf_ckpts" task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data') cl_token = task.databuilder.sentence_encoder().encode(constants.bos) modelz = {} ckptz = {} config={'student_exp_name':'gc_f_std100', 'teacher_exp_name':'gc_o_tchr100', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['b2l_std100'] = model ckptz['b2l_std100'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['b2l_tchr100'] = teacher_model ckptz['b2l_tchr100'] = teacger_ckpt config={'student_exp_name':'gc_f_std101', 'teacher_exp_name':'gc_o_tchr101', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['b2l_std101'] = model ckptz['b2l_std101'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['b2l_tchr101'] = teacher_model ckptz['b2l_tchr101'] = teacger_ckpt config={'student_exp_name':'gc_f_std102', 'teacher_exp_name':'gc_o_tchr102', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['l2l_std102'] = model ckptz['l2l_std102'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['l2l_tchr102'] = teacher_model ckptz['l2l_tchr102'] = teacger_ckpt config={'student_exp_name':'gc_f_std103', 'teacher_exp_name':'gc_o_tchr103', 'task_name':'word_sv_agreement_vp', 'teacher_model':'cl_bert', 'student_model':'cl_lstm', 'teacher_config':'small_gpt_v9', 'student_config':'small_lstm_v4', 'distill_config':'pure_dstl_4_crs_slw', 'distill_mode':'offline', 'chkpt_dir':'../tf_ckpts', } std_hparams=get_model_params(task, config['student_model'], config['student_config']) model, ckpt = get_student_model(config, task, std_hparams, cl_token) modelz['b2l_std103'] = model ckptz['b2l_std103'] = ckpt tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config']) teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token) modelz['b2l_tchr103'] = teacher_model ckptz['b2l_tchr103'] = teacger_ckpt infl_eng = inflect.engine() verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab') keys = modelz.keys() for key in keys: model = modelz[key] print('##################################') print(key, ckptz[key]) distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task) compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
4,447
29.258503
108
py
Reflect
Reflect-master/prep_data/split.py
import sys import os import errno import random from util.text_util import deps_from_tsv, deps_to_tsv def make_splits(fname, expr_dir, prop_train=0.1, prop_valid=0.01): # for reproducibility random.seed(42) print('| read in the data') data = deps_from_tsv(fname) print('| shuffling') random.shuffle(data) n_train = int(len(data) * prop_train) n_valid = int(len(data) * prop_valid) train = data[:n_train] valid = data[n_train: n_train+n_valid] test = data[n_train+n_valid:] try: os.mkdir(expr_dir) except OSError as exc: if exc.errno != errno.EEXIST: raise pass print('| splitting') deps_to_tsv(train, os.path.join(expr_dir, 'train.tsv')) deps_to_tsv(valid, os.path.join(expr_dir, 'valid.tsv')) deps_to_tsv(test, os.path.join(expr_dir, 'test.tsv')) print('| done!') if __name__ == '__main__': make_splits(sys.argv[1], sys.argv[2])
947
25.333333
66
py
Reflect
Reflect-master/prep_data/gen_bowman_logic.py
from itertools import chain from itertools import combinations from collections import Counter import random def powerset(iterable): s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) def get_candidate_worlds(num_vars): return powerset(set(range(num_vars))) def get_satisfying_worlds_for_tree(tree, candidate_worlds): if isinstance(tree, tuple): if tree[0] == 'not': child = get_satisfying_worlds_for_tree(tree[1], candidate_worlds) return candidate_worlds.difference(child) else: left = get_satisfying_worlds_for_tree(tree[0], candidate_worlds) right = get_satisfying_worlds_for_tree(tree[2], candidate_worlds) if tree[1] == "and": return left.intersection(right) elif tree[1] == "or": return left.union(right) else: print('syntax error', tree) else: result = [] for world in candidate_worlds: if tree in world: result.append(world) return set(result) def compute_relation(left, right, universe): ne_intersection = left.intersection(right) ne_just_left = left.difference(right) ne_just_right = right.difference(left) ne_outside = universe.difference(left.union(right)) if (ne_intersection and not ne_just_right and not ne_just_left and ne_outside): return "=" elif (ne_intersection and ne_just_right and not ne_just_left and ne_outside): return "<" elif (ne_intersection and not ne_just_right and ne_just_left and ne_outside): return ">" elif (not ne_intersection and ne_just_right and ne_just_left and not ne_outside): return "^" elif (not ne_intersection and ne_just_right and ne_just_left and ne_outside): return "|" elif (ne_intersection and ne_just_right and ne_just_left and not ne_outside): return "v" else: return "#" def create_sub_statement(universe, maxlen): operator = random.choice(operators) temp = () if operator == '0' or maxlen < 2: temp = random.choice(list(universe)) else: lhs = create_sub_statement(universe, maxlen / 2) rhs = create_sub_statement(universe, maxlen / 2) temp = tuple([lhs, operator, rhs]) neg_or_none = random.choice(neg_or_nones) if neg_or_none == '0': return temp else: return tuple([neg_or_none, temp]) def uniq(seq, idfun=None): # order preserving if idfun is None: def idfun(x): return x seen = {} result = [] for item in seq: marker = idfun(item) # in old Python versions: # if seen.has_key(marker) # but in new ones: if marker in seen: continue seen[marker] = 1 result.append(item) return result def to_string(expr, individuals): if isinstance(expr, int): return individuals[expr] if isinstance(expr, str): return expr elif len(expr) == 3: return "( " + to_string(expr[0], individuals) \ + " ( " + to_string(expr[1], individuals) \ + " " + to_string(expr[2], individuals) + " ) )" else: return "( " + to_string(expr[0], individuals) \ + " " + to_string(expr[1], individuals) + " )" def get_len(tree): if isinstance(tree, tuple): accum = 0 for entry in tree: accum += get_len(entry) return accum elif tree == 'and' or tree == 'or' or tree == 'not': return 1 else: return 0 individuals = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] worlds = set(get_candidate_worlds(6)) universe = set(range(6)) neg_or_nones = ['not', '0', '0'] operators = ['and', 'or', 'and', 'or', '0', '0', '0', '0', '0'] stats = Counter() total = 0 outputs = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: []} while total < 600000: subuniverse = random.sample(universe, 4) lhs = create_sub_statement(subuniverse, 12) rhs = create_sub_statement(subuniverse, 12) sat1 = get_satisfying_worlds_for_tree(lhs, worlds) sat2 = get_satisfying_worlds_for_tree(rhs, worlds) if sat1 == worlds or len(sat1) == 0: continue if sat2 == worlds or len(sat2) == 0: continue rel = compute_relation(sat1, sat2, worlds) if rel != "?": stats[rel] += 1 total += 1 max_len = min(max(get_len(rhs), get_len(lhs)), 12) outputs[max_len].append("" + rel + "\t" + to_string( lhs, individuals) + "\t" + to_string(rhs, individuals)) TRAIN_PORTION = 0.8 VALID_PORTION = 0.1 for length in outputs.keys(): outputs[length] = uniq(outputs[length]) total = len(outputs[length]) filename = 'train' + str(length) f = open(filename, 'w') for i in range(int(TRAIN_PORTION * total)): output = outputs[length][i] f.write(output + "\n") f.close() filename = 'valid' + str(length) f = open(filename, 'w') validx = int((TRAIN_PORTION + VALID_PORTION) * total) for i in range(int(TRAIN_PORTION * total), validx): output = outputs[length][i] f.write(output + "\n") f.close() filename = 'test' + str(length) f = open(filename, 'w') for i in range(validx, total): output = outputs[length][i] f.write(output + "\n") f.close() print(stats)
5,553
28.386243
77
py
Reflect
Reflect-master/prep_data/build_dictionary.py
from util import text_util as utils from util import constants from sys import argv import numpy as np import os def build_and_save_dic(input_file, data_dir): worddict = {} worddict[constants.pad] = constants.pad_idx worddict[constants.unk] = constants.unk_idx worddict[constants.bos] = constants.bos_idx worddict[constants.eos] = constants.eos_idx input_file = os.path.join(data_dir, input_file) for dep in utils.deps_from_tsv(input_file): for w in dep['sentence'].split(): if w not in worddict: worddict[w] = len(worddict) vocab_file = os.path.join(data_dir, 'vocab') print('| write vocabulary to %s' % vocab_file) np.save(vocab_file, arr=worddict) print('| vocabulary size %d' % len(worddict)) print('| done!') if __name__ == '__main__': data_dir = argv[1] input_file = argv[2] build_and_save_dic(input_file=input_file, data_dir=data_dir)
969
26.714286
51
py
PyKrige
PyKrige-main/setup.py
"""Kriging Toolkit for Python.""" import os import numpy as np from Cython.Build import cythonize from setuptools import Extension, setup # cython extensions CY_MODULES = [ Extension( name=f"pykrige.{ext}", sources=[os.path.join("src", "pykrige", *ext.split(".")) + ".pyx"], include_dirs=[np.get_include()], define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")], ) for ext in ["lib.cok", "lib.variogram_models"] ] # setup - do not include package data to ignore .pyx files in wheels setup(ext_modules=cythonize(CY_MODULES), include_package_data=False)
634
27.863636
75
py
PyKrige
PyKrige-main/benchmarks/kriging_benchmarks.py
"""Benchmarks.""" from time import time import numpy as np from pykrige.ok import OrdinaryKriging np.random.seed(19999) VARIOGRAM_MODELS = ["power", "gaussian", "spherical", "exponential", "linear"] BACKENDS = ["vectorized", "loop", "C"] N_MOVING_WINDOW = [None, 10, 50, 100] def make_benchark(n_train, n_test, n_dim=2): """Compute the benchmarks for Ordianry Kriging. Parameters ---------- n_train : int number of points in the training set n_test : int number of points in the test set n_dim : int number of dimensions (default=2) Returns ------- res : dict a dictionary with the timing results """ X_train = np.random.rand(n_train, n_dim) y_train = np.random.rand(n_train) X_test = np.random.rand(n_test, n_dim) res = {} for variogram_model in VARIOGRAM_MODELS: tic = time() OK = OrdinaryKriging( X_train[:, 0], X_train[:, 1], y_train, variogram_model="linear", verbose=False, enable_plotting=False, ) res["t_train_{}".format(variogram_model)] = time() - tic # All the following tests are performed with the linear variogram model for backend in BACKENDS: for n_closest_points in N_MOVING_WINDOW: if backend == "vectorized" and n_closest_points is not None: continue # this is not supported tic = time() OK.execute( "points", X_test[:, 0], X_test[:, 1], backend=backend, n_closest_points=n_closest_points, ) res["t_test_{}_{}".format(backend, n_closest_points)] = time() - tic return res def print_benchmark(n_train, n_test, n_dim, res): """Print the benchmarks. Parameters ---------- n_train : int number of points in the training set n_test : int number of points in the test set n_dim : int number of dimensions (default=2) res : dict a dictionary with the timing results """ print("=" * 80) print(" " * 10, "N_dim={}, N_train={}, N_test={}".format(n_dim, n_train, n_test)) print("=" * 80) print("\n", "# Training the model", "\n") print("|".join(["{:>11} ".format(el) for el in ["t_train (s)"] + VARIOGRAM_MODELS])) print("-" * (11 + 2) * (len(VARIOGRAM_MODELS) + 1)) print( "|".join( ["{:>11} ".format("Training")] + [ "{:>11.2} ".format(el) for el in [res["t_train_{}".format(mod)] for mod in VARIOGRAM_MODELS] ] ) ) print("\n", "# Predicting kriging points", "\n") print("|".join(["{:>11} ".format(el) for el in ["t_test (s)"] + BACKENDS])) print("-" * (11 + 2) * (len(BACKENDS) + 1)) for n_closest_points in N_MOVING_WINDOW: timing_results = [ res.get("t_test_{}_{}".format(mod, n_closest_points), "") for mod in BACKENDS ] print( "|".join( ["{:>11} ".format("N_nn=" + str(n_closest_points))] + ["{:>11.2} ".format(el) for el in timing_results] ) ) if __name__ == "__main__": for no_train, no_test in [(400, 1000), (400, 2000), (800, 2000)]: results = make_benchark(no_train, no_test) print_benchmark(no_train, no_test, 2, results)
3,473
27.47541
88
py
PyKrige
PyKrige-main/examples/06_exact_values_example_1D.py
""" Exact Values ============ PyKrige demonstration and usage as a non-exact interpolator in 1D. """ import matplotlib.pyplot as plt import numpy as np from pykrige.ok import OrdinaryKriging plt.style.use("ggplot") np.random.seed(42) x = np.linspace(0, 12.5, 50) xpred = np.linspace(0, 12.5, 393) y = np.sin(x) * np.exp(-0.25 * x) + np.random.normal(-0.25, 0.25, 50) # compare OrdinaryKriging as an exact and non exact interpolator uk = OrdinaryKriging( x, np.zeros(x.shape), y, variogram_model="linear", exact_values=False ) uk_exact = OrdinaryKriging(x, np.zeros(x.shape), y, variogram_model="linear") y_pred, y_std = uk.execute("grid", xpred, np.array([0.0]), backend="loop") y_pred_exact, y_std_exact = uk_exact.execute( "grid", xpred, np.array([0.0]), backend="loop" ) y_pred = np.squeeze(y_pred) y_std = np.squeeze(y_std) y_pred_exact = np.squeeze(y_pred_exact) y_std_exact = np.squeeze(y_std_exact) fig, ax = plt.subplots(1, 1, figsize=(10, 4)) ax.scatter(x, y, label="Input Data") ax.plot(xpred, y_pred_exact, label="Exact Prediction") ax.plot(xpred, y_pred, label="Non Exact Prediction") ax.fill_between( xpred, y_pred - 3 * y_std, y_pred + 3 * y_std, alpha=0.3, label="Confidence interval", ) ax.legend(loc=9) ax.set_ylim(-1.8, 1.3) ax.legend(loc=9) plt.xlabel("X") plt.ylabel("Field") plt.show()
1,375
21.557377
77
py
PyKrige
PyKrige-main/examples/00_ordinary.py
""" Ordinary Kriging Example ======================== First we will create a 2D dataset together with the associated x, y grids. """ import matplotlib.pyplot as plt import numpy as np import pykrige.kriging_tools as kt from pykrige.ok import OrdinaryKriging data = np.array( [ [0.3, 1.2, 0.47], [1.9, 0.6, 0.56], [1.1, 3.2, 0.74], [3.3, 4.4, 1.47], [4.7, 3.8, 1.74], ] ) gridx = np.arange(0.0, 5.5, 0.5) gridy = np.arange(0.0, 5.5, 0.5) # Create the ordinary kriging object. Required inputs are the X-coordinates of # the data points, the Y-coordinates of the data points, and the Z-values of the # data points. If no variogram model is specified, defaults to a linear variogram # model. If no variogram model parameters are specified, then the code automatically # calculates the parameters by fitting the variogram model to the binned # experimental semivariogram. The verbose kwarg controls code talk-back, and # the enable_plotting kwarg controls the display of the semivariogram. OK = OrdinaryKriging( data[:, 0], data[:, 1], data[:, 2], variogram_model="linear", verbose=False, enable_plotting=False, ) # Creates the kriged grid and the variance grid. Allows for kriging on a rectangular # grid of points, on a masked rectangular grid of points, or with arbitrary points. # (See OrdinaryKriging.__doc__ for more information.) z, ss = OK.execute("grid", gridx, gridy) # Writes the kriged grid to an ASCII grid file and plot it. kt.write_asc_grid(gridx, gridy, z, filename="output.asc") plt.imshow(z) plt.show()
1,840
30.20339
84
py
PyKrige
PyKrige-main/examples/07_regression_kriging2d.py
""" Regression kriging ------------------ An example of regression kriging """ import sys from sklearn.datasets import fetch_california_housing from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.svm import SVR from pykrige.rk import RegressionKriging svr_model = SVR(C=0.1, gamma="auto") rf_model = RandomForestRegressor(n_estimators=100) lr_model = LinearRegression(normalize=True, copy_X=True, fit_intercept=False) models = [svr_model, rf_model, lr_model] try: housing = fetch_california_housing() except PermissionError: # this dataset can occasionally fail to download on Windows sys.exit(0) # take the first 5000 as Kriging is memory intensive p = housing["data"][:5000, :-2] x = housing["data"][:5000, -2:] target = housing["target"][:5000] p_train, p_test, x_train, x_test, target_train, target_test = train_test_split( p, x, target, test_size=0.3, random_state=42 ) for m in models: print("=" * 40) print("regression model:", m.__class__.__name__) m_rk = RegressionKriging(regression_model=m, n_closest_points=10) m_rk.fit(p_train, x_train, target_train) print("Regression Score: ", m_rk.regression_model.score(p_test, target_test)) print("RK score: ", m_rk.score(p_test, x_test, target_test))
1,368
28.76087
81
py
PyKrige
PyKrige-main/examples/10_classification_kriging2d.py
""" Classification kriging ---------------------- An example of classification kriging """ import sys from sklearn.datasets import fetch_california_housing from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import KBinsDiscretizer from sklearn.svm import SVC from pykrige.ck import ClassificationKriging svc_model = SVC(C=0.1, gamma="auto", probability=True) rf_model = RandomForestClassifier(n_estimators=100) lr_model = LogisticRegression(max_iter=10000) models = [svc_model, rf_model, lr_model] try: housing = fetch_california_housing() except PermissionError: # this dataset can occasionally fail to download on Windows sys.exit(0) # take the first 5000 as Kriging is memory intensive p = housing["data"][:5000, :-2] x = housing["data"][:5000, -2:] target = housing["target"][:5000] discretizer = KBinsDiscretizer(encode="ordinal") target = discretizer.fit_transform(target.reshape(-1, 1)) p_train, p_test, x_train, x_test, target_train, target_test = train_test_split( p, x, target, test_size=0.3, random_state=42 ) for m in models: print("=" * 40) print("classification model:", m.__class__.__name__) m_ck = ClassificationKriging(classification_model=m, n_closest_points=10) m_ck.fit(p_train, x_train, target_train) print( "Classification Score: ", m_ck.classification_model.score(p_test, target_test) ) print("CK score: ", m_ck.score(p_test, x_test, target_test))
1,566
29.72549
86
py
PyKrige
PyKrige-main/examples/01_universal.py
""" Universal Kriging Example ========================= In this example we apply a regional linear trend to the kriging system. """ import matplotlib.pyplot as plt import numpy as np from pykrige.uk import UniversalKriging data = np.array( [ [0.3, 1.2, 0.47], [1.9, 0.6, 0.56], [1.1, 3.2, 0.74], [3.3, 4.4, 1.47], [4.7, 3.8, 1.74], ] ) gridx = np.arange(0.0, 5.5, 0.5) gridy = np.arange(0.0, 5.5, 0.5) # Create the universal kriging object. Required inputs are the X-coordinates of # the data points, the Y-coordinates of the data points, and the Z-values of the # data points. Variogram is handled as in the ordinary kriging case. # drift_terms is a list of the drift terms to include; currently supported terms # are 'regional_linear', 'point_log', and 'external_Z'. Refer to # UniversalKriging.__doc__ for more information. UK = UniversalKriging( data[:, 0], data[:, 1], data[:, 2], variogram_model="linear", drift_terms=["regional_linear"], ) # Creates the kriged grid and the variance grid. Allows for kriging on a rectangular # grid of points, on a masked rectangular grid of points, or with arbitrary points. # (See UniversalKriging.__doc__ for more information.) z, ss = UK.execute("grid", gridx, gridy) plt.imshow(z) plt.show()
1,475
27.941176
84
py
PyKrige
PyKrige-main/examples/08_krige_cv.py
""" Krige CV -------- Searching for optimal kriging parameters with cross validation """ import numpy as np from sklearn.model_selection import GridSearchCV from pykrige.rk import Krige # 2D Kring param opt param_dict = { "method": ["ordinary", "universal"], "variogram_model": ["linear", "power", "gaussian", "spherical"], # "nlags": [4, 6, 8], # "weight": [True, False] } estimator = GridSearchCV(Krige(), param_dict, verbose=True, return_train_score=True) # dummy data X = np.random.randint(0, 400, size=(100, 2)).astype(float) y = 5 * np.random.rand(100) # run the gridsearch estimator.fit(X=X, y=y) if hasattr(estimator, "best_score_"): print("best_score R² = {:.3f}".format(estimator.best_score_)) print("best_params = ", estimator.best_params_) print("\nCV results::") if hasattr(estimator, "cv_results_"): for key in [ "mean_test_score", "mean_train_score", "param_method", "param_variogram_model", ]: print(" - {} : {}".format(key, estimator.cv_results_[key])) # 3D Kring param opt param_dict3d = { "method": ["ordinary3d", "universal3d"], "variogram_model": ["linear", "power", "gaussian", "spherical"], # "nlags": [4, 6, 8], # "weight": [True, False] } estimator = GridSearchCV(Krige(), param_dict3d, verbose=True, return_train_score=True) # dummy data X3 = np.random.randint(0, 400, size=(100, 3)).astype(float) y = 5 * np.random.rand(100) # run the gridsearch estimator.fit(X=X3, y=y) if hasattr(estimator, "best_score_"): print("best_score R² = {:.3f}".format(estimator.best_score_)) print("best_params = ", estimator.best_params_) print("\nCV results::") if hasattr(estimator, "cv_results_"): for key in [ "mean_test_score", "mean_train_score", "param_method", "param_variogram_model", ]: print(" - {} : {}".format(key, estimator.cv_results_[key]))
1,951
23.708861
86
py
PyKrige
PyKrige-main/examples/02_kriging3D.py
""" Three-Dimensional Kriging Example ================================= """ import numpy as np from matplotlib import pyplot as plt from pykrige.ok3d import OrdinaryKriging3D from pykrige.uk3d import UniversalKriging3D data = np.array( [ [0.1, 0.1, 0.3, 0.9], [0.2, 0.1, 0.4, 0.8], [0.1, 0.3, 0.1, 0.9], [0.5, 0.4, 0.4, 0.5], [0.3, 0.3, 0.2, 0.7], ] ) gridx = np.arange(0.0, 0.6, 0.05) gridy = np.arange(0.0, 0.6, 0.01) gridz = np.arange(0.0, 0.6, 0.1) # Create the 3D ordinary kriging object and solves for the three-dimension kriged # volume and variance. Refer to OrdinaryKriging3D.__doc__ for more information. ok3d = OrdinaryKriging3D( data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear" ) k3d1, ss3d = ok3d.execute("grid", gridx, gridy, gridz) # Create the 3D universal kriging object and solves for the three-dimension kriged # volume and variance. Refer to UniversalKriging3D.__doc__ for more information. uk3d = UniversalKriging3D( data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear", drift_terms=["regional_linear"], ) k3d2, ss3d = uk3d.execute("grid", gridx, gridy, gridz) # To use the generic 'specified' drift term, the user must provide the drift values # at each data point and at every grid point. The following example is equivalent to # using a linear drift in all three spatial dimensions. Refer to # UniversalKriging3D.__doc__ for more information. zg, yg, xg = np.meshgrid(gridz, gridy, gridx, indexing="ij") uk3d = UniversalKriging3D( data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear", drift_terms=["specified"], specified_drift=[data[:, 0], data[:, 1], data[:, 2]], ) k3d3, ss3d = uk3d.execute( "grid", gridx, gridy, gridz, specified_drift_arrays=[xg, yg, zg] ) # To use the generic 'functional' drift term, the user must provide a callable # function that takes only the spatial dimensions as arguments. The following example # is equivalent to using a linear drift only in the x-direction. Refer to # UniversalKriging3D.__doc__ for more information. func = lambda x, y, z: x uk3d = UniversalKriging3D( data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear", drift_terms=["functional"], functional_drift=[func], ) k3d4, ss3d = uk3d.execute("grid", gridx, gridy, gridz) # Note that the use of the 'specified' and 'functional' generic drift capabilities is # essentially identical in the two-dimensional universal kriging class (except for a # difference in the number of spatial coordinates for the passed drift functions). # See UniversalKriging.__doc__ for more information. fig, (ax1, ax2, ax3, ax4) = plt.subplots(4) ax1.imshow(k3d1[:, :, 0], origin="lower") ax1.set_title("ordinary kriging") ax2.imshow(k3d2[:, :, 0], origin="lower") ax2.set_title("regional lin. drift") ax3.imshow(k3d3[:, :, 0], origin="lower") ax3.set_title("specified drift") ax4.imshow(k3d4[:, :, 0], origin="lower") ax4.set_title("functional drift") plt.tight_layout() plt.show()
3,514
32.47619
85
py
PyKrige
PyKrige-main/examples/05_kriging_1D.py
""" 1D Kriging ========== An example of 1D kriging with PyKrige """ import matplotlib.pyplot as plt import numpy as np from pykrige import OrdinaryKriging plt.style.use("ggplot") # fmt: off # Data taken from X, y = np.array([ [-5.01, 1.06], [-4.90, 0.92], [-4.82, 0.35], [-4.69, 0.49], [-4.56, 0.52], [-4.52, 0.12], [-4.39, 0.47], [-4.32,-0.19], [-4.19, 0.08], [-4.11,-0.19], [-4.00,-0.03], [-3.89,-0.03], [-3.78,-0.05], [-3.67, 0.10], [-3.59, 0.44], [-3.50, 0.66], [-3.39,-0.12], [-3.28, 0.45], [-3.20, 0.14], [-3.07,-0.28], [-3.01,-0.46], [-2.90,-0.32], [-2.77,-1.58], [-2.69,-1.44], [-2.60,-1.51], [-2.49,-1.50], [-2.41,-2.04], [-2.28,-1.57], [-2.19,-1.25], [-2.10,-1.50], [-2.00,-1.42], [-1.91,-1.10], [-1.80,-0.58], [-1.67,-1.08], [-1.61,-0.79], [-1.50,-1.00], [-1.37,-0.04], [-1.30,-0.54], [-1.19,-0.15], [-1.06,-0.18], [-0.98,-0.25], [-0.87,-1.20], [-0.78,-0.49], [-0.68,-0.83], [-0.57,-0.15], [-0.50, 0.00], [-0.38,-1.10], [-0.29,-0.32], [-0.18,-0.60], [-0.09,-0.49], [0.03 ,-0.50], [0.09 ,-0.02], [0.20 ,-0.47], [0.31 ,-0.11], [0.41 ,-0.28], [0.53 , 0.40], [0.61 , 0.11], [0.70 , 0.32], [0.94 , 0.42], [1.02 , 0.57], [1.13 , 0.82], [1.24 , 1.18], [1.30 , 0.86], [1.43 , 1.11], [1.50 , 0.74], [1.63 , 0.75], [1.74 , 1.15], [1.80 , 0.76], [1.93 , 0.68], [2.03 , 0.03], [2.12 , 0.31], [2.23 ,-0.14], [2.31 ,-0.88], [2.40 ,-1.25], [2.50 ,-1.62], [2.63 ,-1.37], [2.72 ,-0.99], [2.80 ,-1.92], [2.83 ,-1.94], [2.91 ,-1.32], [3.00 ,-1.69], [3.13 ,-1.84], [3.21 ,-2.05], [3.30 ,-1.69], [3.41 ,-0.53], [3.52 ,-0.55], [3.63 ,-0.92], [3.72 ,-0.76], [3.80 ,-0.41], [3.91 , 0.12], [4.04 , 0.25], [4.13 , 0.16], [4.24 , 0.26], [4.32 , 0.62], [4.44 , 1.69], [4.52 , 1.11], [4.65 , 0.36], [4.74 , 0.79], [4.84 , 0.87], [4.93 , 1.01], [5.02 , 0.55] ]).T # fmt: on X_pred = np.linspace(-6, 6, 200) # pykrige doesn't support 1D data for now, only 2D or 3D # adapting the 1D input to 2D uk = OrdinaryKriging(X, np.zeros(X.shape), y, variogram_model="gaussian") y_pred, y_std = uk.execute("grid", X_pred, np.array([0.0])) y_pred = np.squeeze(y_pred) y_std = np.squeeze(y_std) fig, ax = plt.subplots(1, 1, figsize=(10, 4)) ax.scatter(X, y, s=40, label="Input data") ax.plot(X_pred, y_pred, label="Predicted values") ax.fill_between( X_pred, y_pred - 3 * y_std, y_pred + 3 * y_std, alpha=0.3, label="Confidence interval", ) ax.legend(loc=9) ax.set_xlabel("x") ax.set_ylabel("y") ax.set_xlim(-6, 6) ax.set_ylim(-2.8, 3.5) plt.show()
2,626
36
79
py
PyKrige
PyKrige-main/examples/04_krige_geometric.py
""" Geometric example ================= A small example script showing the usage of the 'geographic' coordinates type for ordinary kriging on a sphere. """ import numpy as np from matplotlib import pyplot as plt from pykrige.ok import OrdinaryKriging # Make this example reproducible: np.random.seed(89239413) # Generate random data following a uniform spatial distribution # of nodes and a uniform distribution of values in the interval # [2.0, 5.5]: N = 7 lon = 360.0 * np.random.random(N) lat = 180.0 / np.pi * np.arcsin(2 * np.random.random(N) - 1) z = 3.5 * np.random.rand(N) + 2.0 # Generate a regular grid with 60° longitude and 30° latitude steps: grid_lon = np.linspace(0.0, 360.0, 7) grid_lat = np.linspace(-90.0, 90.0, 7) # Create ordinary kriging object: OK = OrdinaryKriging( lon, lat, z, variogram_model="linear", verbose=False, enable_plotting=False, coordinates_type="geographic", ) # Execute on grid: z1, ss1 = OK.execute("grid", grid_lon, grid_lat) # Create ordinary kriging object ignoring curvature: OK = OrdinaryKriging( lon, lat, z, variogram_model="linear", verbose=False, enable_plotting=False ) # Execute on grid: z2, ss2 = OK.execute("grid", grid_lon, grid_lat) # Print data at equator (last longitude index will show periodicity): print("Original data:") print("Longitude:", lon.astype(int)) print("Latitude: ", lat.astype(int)) print("z: ", np.array_str(z, precision=2)) print("\nKrige at 60° latitude:\n======================") print("Longitude:", grid_lon) print("Value: ", np.array_str(z1[5, :], precision=2)) print("Sigma²: ", np.array_str(ss1[5, :], precision=2)) print("\nIgnoring curvature:\n=====================") print("Value: ", np.array_str(z2[5, :], precision=2)) print("Sigma²: ", np.array_str(ss2[5, :], precision=2)) # We can see that the data point at longitude 122, latitude 50 correctly # dominates the kriged results, since it is the closest node in spherical # distance metric, as longitude differences scale with cos(latitude). # When kriging using longitude / latitude linearly, the value for grid points # with longitude values further away as longitude is now incorrectly # weighted equally as latitude. fig, (ax1, ax2) = plt.subplots(1, 2) ax1.imshow(z1, extent=[0, 360, -90, 90], origin="lower") ax1.set_title("geo-coordinates") ax2.imshow(z2, extent=[0, 360, -90, 90], origin="lower") ax2.set_title("non geo-coordinates") plt.show()
2,636
31.555556
79
py
PyKrige
PyKrige-main/examples/03_gstools_covmodel.py
""" GSTools Interface ================= Example how to use the PyKrige routines with a GSTools CovModel. """ import gstools as gs import numpy as np from matplotlib import pyplot as plt from pykrige.ok import OrdinaryKriging # conditioning data data = np.array( [ [0.3, 1.2, 0.47], [1.9, 0.6, 0.56], [1.1, 3.2, 0.74], [3.3, 4.4, 1.47], [4.7, 3.8, 1.74], ] ) # grid definition for output field gridx = np.arange(0.0, 5.5, 0.1) gridy = np.arange(0.0, 6.5, 0.1) # a GSTools based covariance model cov_model = gs.Gaussian(dim=2, len_scale=4, anis=0.2, angles=-0.5, var=0.5, nugget=0.1) # ordinary kriging with pykrige OK1 = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], cov_model) z1, ss1 = OK1.execute("grid", gridx, gridy) plt.imshow(z1, origin="lower") plt.show()
844
23.852941
87
py
PyKrige
PyKrige-main/src/pykrige/ok.py
""" PyKrige ======= Code by Benjamin S. Murphy and the PyKrige Developers bscott.murphy@gmail.com Summary ------- Contains class OrdinaryKriging, which provides easy access to 2D Ordinary Kriging. References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. Copyright (c) 2015-2020, PyKrige Developers """ import warnings import numpy as np import scipy.linalg from scipy.spatial.distance import cdist from . import core, variogram_models from .compat_gstools import validate_gstools from .core import ( P_INV, _adjust_for_anisotropy, _find_statistics, _initialize_variogram_model, _make_variogram_parameter_list, ) class OrdinaryKriging: r"""Convenience class for easy access to 2D Ordinary Kriging. Parameters ---------- x : array_like X-coordinates of data points. y : array_like Y-coordinates of data points. z : array-like Values at data points. variogram_model : str or GSTools CovModel, optional Specifies which variogram model to use; may be one of the following: linear, power, gaussian, spherical, exponential, hole-effect. Default is linear variogram model. To utilize a custom variogram model, specify 'custom'; you must also provide variogram_parameters and variogram_function. Note that the hole-effect model is only technically correct for one-dimensional problems. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional Parameters that define the specified variogram model. If not provided, parameters will be automatically calculated using a "soft" L1 norm minimization scheme. For variogram model parameters provided in a dict, the required dict keys vary according to the specified variogram model: :: # linear {'slope': slope, 'nugget': nugget} # power {'scale': scale, 'exponent': exponent, 'nugget': nugget} # gaussian, spherical, exponential and hole-effect: {'sill': s, 'range': r, 'nugget': n} # OR {'psill': p, 'range': r, 'nugget': n} Note that either the full sill or the partial sill (psill = sill - nugget) can be specified in the dict. For variogram model parameters provided in a list, the entries must be as follows: :: # linear [slope, nugget] # power [scale, exponent, nugget] # gaussian, spherical, exponential and hole-effect: [sill, range, nugget] Note that the full sill (NOT the partial sill) must be specified in the list format. For a custom variogram model, the parameters are required, as custom variogram models will not automatically be fit to the data. Furthermore, the parameters must be specified in list format, in the order in which they are used in the callable function (see variogram_function for more information). The code does not check that the provided list contains the appropriate number of parameters for the custom variogram model, so an incorrect parameter list in such a case will probably trigger an esoteric exception someplace deep in the code. NOTE that, while the list format expects the full sill, the code itself works internally with the partial sill. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. The function must take only two arguments: first, a list of parameters for the variogram model; second, the distances at which to calculate the variogram model. The list provided in variogram_parameters will be passed to the function as the first argument. nlags : int, optional Number of averaging bins for the semivariogram. Default is 6. weight : bool, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. The routine is currently hard-coded such that the weights are calculated from a logistic function, so weights at small lags are ~1 and weights at the longest lags are ~0; the center of the logistic weighting is hard-coded to be at 70% of the distance from the shortest lag to the largest lag. Setting this parameter to True indicates that weights will be applied. Default is False. (Kitanidis suggests that the values at smaller lags are more important in fitting a variogram model, so the option is provided to enable such weighting.) anisotropy_scaling : float, optional Scalar stretching value to take into account anisotropy. Default is 1 (effectively no stretching). Scaling is applied in the y-direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle, if anisotropy_angle is not 0). This parameter has no effect if coordinate_types is set to 'geographic'. anisotropy_angle : float, optional CCW angle (in degrees) by which to rotate coordinate system in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. This parameter has no effect if coordinate_types is set to 'geographic'. verbose : bool, optional Enables program text output to monitor kriging process. Default is False (off). enable_plotting : bool, optional Enables plotting to display variogram. Default is False (off). enable_statistics : bool, optional Default is False coordinates_type : str, optional One of 'euclidean' or 'geographic'. Determines if the x and y coordinates are interpreted as on a plane ('euclidean') or as coordinates on a sphere ('geographic'). In case of geographic coordinates, x is interpreted as longitude and y as latitude coordinates, both given in degree. Longitudes are expected in [0, 360] and latitudes in [-90, 90]. Default is 'euclidean'. exact_values : bool, optional If True, interpolation provides input values at input locations. If False, interpolation accounts for variance/nugget within input values at input locations and does not behave as an exact-interpolator [2]. Note that this only has an effect if there is variance/nugget present within the input data since it is interpreted as measurement error. If the nugget is zero, the kriged field will behave as an exact interpolator. pseudo_inv : :class:`bool`, optional Whether the kriging system is solved with the pseudo inverted kriging matrix. If `True`, this leads to more numerical stability and redundant points are averaged. But it can take more time. Default: False pseudo_inv_type : :class:`str`, optional Here you can select the algorithm to compute the pseudo-inverse matrix: * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values Default: `"pinv"` References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. """ eps = 1.0e-10 # Cutoff for comparison to zero variogram_dict = { "linear": variogram_models.linear_variogram_model, "power": variogram_models.power_variogram_model, "gaussian": variogram_models.gaussian_variogram_model, "spherical": variogram_models.spherical_variogram_model, "exponential": variogram_models.exponential_variogram_model, "hole-effect": variogram_models.hole_effect_variogram_model, } def __init__( self, x, y, z, variogram_model="linear", variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling=1.0, anisotropy_angle=0.0, verbose=False, enable_plotting=False, enable_statistics=False, coordinates_type="euclidean", exact_values=True, pseudo_inv=False, pseudo_inv_type="pinv", ): # config the pseudo inverse self.pseudo_inv = bool(pseudo_inv) self.pseudo_inv_type = str(pseudo_inv_type) if self.pseudo_inv_type not in P_INV: raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type)) # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None if not isinstance(exact_values, bool): raise ValueError("exact_values has to be boolean True or False") self.exact_values = exact_values self.coordinates_type = coordinates_type # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim == 3: raise ValueError("GSTools: model dim is not 1 or 2") # check if coordinate types match if self.model.latlon and (self.coordinates_type == "euclidean"): raise ValueError( "GSTools: latlon models require geographic coordinates" ) self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling = self.model.pykrige_anis anisotropy_angle = self.model.pykrige_angle if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] # Code assumes 1D input arrays of floats. Ensures that any extraneous # dimensions don't get in the way. Copies are created to avoid any # problems with referencing the original passed arguments. # Also, values are forced to be float... in the future, might be worth # developing complex-number kriging (useful for vector field kriging) self.X_ORIG = np.atleast_1d( np.squeeze(np.array(x, copy=True, dtype=np.float64)) ) self.Y_ORIG = np.atleast_1d( np.squeeze(np.array(y, copy=True, dtype=np.float64)) ) self.Z = np.atleast_1d(np.squeeze(np.array(z, copy=True, dtype=np.float64))) self.verbose = verbose self.enable_plotting = enable_plotting if self.enable_plotting and self.verbose: print("Plotting Enabled\n") # adjust for anisotropy... only implemented for euclidean (rectangular) # coordinates, as anisotropy is ambiguous for geographic coordinates... if self.coordinates_type == "euclidean": self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0 self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0 self.anisotropy_scaling = anisotropy_scaling self.anisotropy_angle = anisotropy_angle if self.verbose: print("Adjusting data for anisotropy...") self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG)).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle], ).T elif self.coordinates_type == "geographic": # Leave everything as is in geographic case. # May be open to discussion? if anisotropy_scaling != 1.0: warnings.warn( "Anisotropy is not compatible with geographic " "coordinates. Ignoring user set anisotropy.", UserWarning, ) self.XCENTER = 0.0 self.YCENTER = 0.0 self.anisotropy_scaling = 1.0 self.anisotropy_angle = 0.0 self.X_ADJUSTED = self.X_ORIG self.Y_ADJUSTED = self.Y_ORIG else: raise ValueError( "Only 'euclidean' and 'geographic' are valid " "values for coordinates-keyword." ) if self.verbose: print("Initializing variogram model...") vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, self.coordinates_type, ) if self.verbose: print("Coordinates type: '%s'" % self.coordinates_type, "\n") if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") if enable_statistics: self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_function, self.variogram_model_parameters, self.coordinates_type, self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") else: self.delta, self.sigma, self.epsilon, self.Q1, self.Q2, self.cR = [None] * 6 def update_variogram_model( self, variogram_model, variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling=1.0, anisotropy_angle=0.0, ): """Allows user to update variogram type and/or variogram model parameters. Parameters __________ variogram_model : str or GSTools CovModel May be any of the variogram models listed above. May also be 'custom', in which case variogram_parameters and variogram_function must be specified. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional List or dict of variogram model parameters, as explained above. If not provided, a best fit model will be calculated as described above. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. See above for more information. nlags : int, optional Number of averaging bins for the semivariogram. Default is 6. weight : boolean, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating the variogram model. See above for more information. True indicates that weights will be applied. Default is False. anisotropy_scaling : float, optional Scalar stretching value to take into account anisotropy. Default is 1 (effectively no stretching). Scaling is applied in the y-direction. anisotropy_angle : float, optional CCW angle (in degrees) by which to rotate coordinate system in order to take into account anisotropy. Default is 0 (no rotation). """ # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim == 3: raise ValueError("GSTools: model dim is not 1 or 2") # check if coordinate types match if self.model.latlon and (self.coordinates_type == "euclidean"): raise ValueError( "GSTools: latlon models require geographic coordinates" ) self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling = self.model.pykrige_anis anisotropy_angle = self.model.pykrige_angle if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] if ( anisotropy_scaling != self.anisotropy_scaling or anisotropy_angle != self.anisotropy_angle ): if self.coordinates_type == "euclidean": if self.verbose: print("Adjusting data for anisotropy...") self.anisotropy_scaling = anisotropy_scaling self.anisotropy_angle = anisotropy_angle self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG)).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle], ).T elif self.coordinates_type == "geographic": if anisotropy_scaling != 1.0: warnings.warn( "Anisotropy is not compatible with geographic" " coordinates. Ignoring user set anisotropy.", UserWarning, ) self.anisotropy_scaling = 1.0 self.anisotropy_angle = 0.0 self.X_ADJUSTED = self.X_ORIG self.Y_ADJUSTED = self.Y_ORIG if self.verbose: print("Updating variogram mode...") # See note above about the 'use_psill' kwarg... vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, self.coordinates_type, ) if self.verbose: print("Coordinates type: '%s'" % self.coordinates_type, "\n") if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_function, self.variogram_model_parameters, self.coordinates_type, self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") def display_variogram_model(self): """Displays variogram model with the actual binned data.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(self.lags, self.semivariance, "r*") ax.plot( self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), "k-", ) plt.show() def get_variogram_points(self): """Returns both the lags and the variogram function evaluated at each of them. The evaluation of the variogram function and the lags are produced internally. This method is convenient when the user wants to access to the lags and the resulting variogram (according to the model provided) for further analysis. Returns ------- (tuple) tuple containing: lags (array) - the lags at which the variogram was evaluated variogram (array) - the variogram function evaluated at the lags """ return ( self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), ) def switch_verbose(self): """Allows user to switch code talk-back on/off. Takes no arguments.""" self.verbose = not self.verbose def switch_plotting(self): """Allows user to switch plot display on/off. Takes no arguments.""" self.enable_plotting = not self.enable_plotting def get_epsilon_residuals(self): """Returns the epsilon residuals for the variogram fit.""" return self.epsilon def plot_epsilon_residuals(self): """Plots the epsilon residuals for the variogram fit.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*") ax.axhline(y=0.0) plt.show() def get_statistics(self): """Returns the Q1, Q2, and cR statistics for the variogram fit (in that order). No arguments. """ return self.Q1, self.Q2, self.cR def print_statistics(self): """Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. """ print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR) def _get_kriging_matrix(self, n): """Assembles the kriging matrix.""" if self.coordinates_type == "euclidean": xy = np.concatenate( (self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1 ) d = cdist(xy, xy, "euclidean") elif self.coordinates_type == "geographic": d = core.great_circle_distance( self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.X_ADJUSTED, self.Y_ADJUSTED, ) a = np.zeros((n + 1, n + 1)) a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d) np.fill_diagonal(a, 0.0) a[n, :] = 1.0 a[:, n] = 1.0 a[n, n] = 0.0 return a def _exec_vector(self, a, bd, mask): """Solves the kriging system as a vectorized operation. This method can take a lot of memory for large grids and/or large datasets.""" npt = bd.shape[0] n = self.X_ADJUSTED.shape[0] zero_index = None zero_value = False # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) b = np.zeros((npt, n + 1, 1)) b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], zero_index[1], 0] = 0.0 b[:, n, 0] = 1.0 if (~mask).any(): mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n + 1, axis=1) b = np.ma.array(b, mask=mask_b) x = np.dot(a_inv, b.reshape((npt, n + 1)).T).reshape((1, n + 1, npt)).T zvalues = np.sum(x[:, :n, 0] * self.Z, axis=1) sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1) return zvalues, sigmasq def _exec_loop(self, a, bd_all, mask): """Solves the kriging system by looping over all specified points. Less memory-intensive, but involves a Python-level loop.""" npt = bd_all.shape[0] n = self.X_ADJUSTED.shape[0] zvalues = np.zeros(npt) sigmasq = np.zeros(npt) # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) for j in np.nonzero(~mask)[ 0 ]: # Note that this is the same thing as range(npt) if mask is not defined, bd = bd_all[j] # otherwise it takes the non-masked elements. if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_index = None zero_value = False b = np.zeros((n + 1, 1)) b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = np.dot(a_inv, b) zvalues[j] = np.sum(x[:n, 0] * self.Z) sigmasq[j] = np.sum(x[:, 0] * -b[:, 0]) return zvalues, sigmasq def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx): """Solves the kriging system by looping over all specified points. Less memory-intensive, but involves a Python-level loop.""" import scipy.linalg.lapack npt = bd_all.shape[0] n = bd_idx.shape[1] zvalues = np.zeros(npt) sigmasq = np.zeros(npt) for i in np.nonzero(~mask)[ 0 ]: # Note that this is the same thing as range(npt) if mask is not defined, b_selector = bd_idx[i] # otherwise it takes the non-masked elements. bd = bd_all[i] a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1]))) a = a_all[a_selector[:, None], a_selector] if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_index = None zero_value = False b = np.zeros((n + 1, 1)) b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = scipy.linalg.solve(a, b) zvalues[i] = x[:n, 0].dot(self.Z[b_selector]) sigmasq[i] = -x[:, 0].dot(b[:, 0]) return zvalues, sigmasq def execute( self, style, xpoints, ypoints, mask=None, backend="vectorized", n_closest_points=None, ): """Calculates a kriged grid and the associated variance. Parameters ---------- style : str Specifies how to treat input kriging points. Specifying 'grid' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid. Specifying 'points' treats xpoints and ypoints as two arrays that provide coordinate pairs at which to solve the kriging system. Specifying 'masked' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints : array_like, shape (N,) or (N, 1) If style is specific as 'grid' or 'masked', x-coordinates of MxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints : array_like, shape (M,) or (M, 1) If style is specified as 'grid' or 'masked', y-coordinates of MxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints and ypoints must have the same dimensions (i.e., M = N). mask : bool, array_like, shape (M, N), optional Specifies the points in the rectangular grid defined by xpoints and ypoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system should will not be solved at the point. backend : str, optional Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Specifying 'C' will utilize a loop in Cython. Default is 'vectorized'. n_closest_points : int, optional For kriging with a moving window, specifies the number of nearby points to use in the calculation. This can speed up the calculation for large datasets, but should be used with caution. As Kitanidis notes, kriging with a moving window can produce unexpected oddities if the variogram model is not carefully chosen. Returns ------- zvalues : ndarray, shape (M, N) or (N, 1) Z-values of specified grid or at the specified set of points. If style was specified as 'masked', zvalues will be a numpy masked array. sigmasq : ndarray, shape (M, N) or (N, 1) Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. """ if self.verbose: print("Executing Ordinary Kriging...\n") if style != "grid" and style != "masked" and style != "points": raise ValueError("style argument must be 'grid', 'points', or 'masked'") if n_closest_points is not None and n_closest_points <= 1: # If this is not checked, nondescriptive errors emerge # later in the code. raise ValueError("n_closest_points has to be at least two!") xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True))) ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True))) n = self.X_ADJUSTED.shape[0] nx = xpts.size ny = ypts.size a = self._get_kriging_matrix(n) if style in ["grid", "masked"]: if style == "masked": if mask is None: raise IOError( "Must specify boolean masking array when style is 'masked'." ) if mask.shape[0] != ny or mask.shape[1] != nx: if mask.shape[0] == nx and mask.shape[1] == ny: mask = mask.T else: raise ValueError( "Mask dimensions do not match specified grid dimensions." ) mask = mask.flatten() npt = ny * nx grid_x, grid_y = np.meshgrid(xpts, ypts) xpts = grid_x.flatten() ypts = grid_y.flatten() elif style == "points": if xpts.size != ypts.size: raise ValueError( "xpoints and ypoints must have " "same dimensions when treated as " "listing discrete points." ) npt = nx else: raise ValueError("style argument must be 'grid', 'points', or 'masked'") if self.coordinates_type == "euclidean": xpts, ypts = _adjust_for_anisotropy( np.vstack((xpts, ypts)).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle], ).T # Prepare for cdist: xy_data = np.concatenate( (self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1 ) xy_points = np.concatenate( (xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1 ) elif self.coordinates_type == "geographic": # In spherical coordinates, we do not correct for anisotropy. # Also, we don't use scipy.spatial.cdist, so we do not have to # format the input data accordingly. pass if style != "masked": mask = np.zeros(npt, dtype="bool") c_pars = None if backend == "C": try: from .lib.cok import _c_exec_loop, _c_exec_loop_moving_window except ImportError: print( "Warning: failed to load Cython extensions.\n" " See https://github.com/GeoStat-Framework/PyKrige/issues/8 \n" " Falling back to a pure python backend..." ) backend = "loop" except: raise RuntimeError("Unknown error in trying to load Cython extension.") c_pars = { key: getattr(self, key) for key in [ "Z", "eps", "variogram_model_parameters", "variogram_function", "exact_values", "pseudo_inv", "pseudo_inv_type", ] } if n_closest_points is not None: if self.coordinates_type == "geographic": # To make use of the KDTree, we have to convert the # spherical coordinates into three dimensional Euclidean # coordinates, since the standard KDTree cannot handle # the periodicity. # Do the conversion just for the step involving the KDTree: lon_d = self.X_ADJUSTED[:, np.newaxis] * np.pi / 180.0 lat_d = self.Y_ADJUSTED[:, np.newaxis] * np.pi / 180.0 xy_data = np.concatenate( ( np.cos(lon_d) * np.cos(lat_d), np.sin(lon_d) * np.cos(lat_d), np.sin(lat_d), ), axis=1, ) lon_p = xpts[:, np.newaxis] * np.pi / 180.0 lat_p = ypts[:, np.newaxis] * np.pi / 180.0 xy_points = np.concatenate( ( np.cos(lon_p) * np.cos(lat_p), np.sin(lon_p) * np.cos(lat_p), np.sin(lat_p), ), axis=1, ) from scipy.spatial import cKDTree tree = cKDTree(xy_data) bd, bd_idx = tree.query(xy_points, k=n_closest_points, eps=0.0) if self.coordinates_type == "geographic": # Between the nearest neighbours from Euclidean search, # calculate the great circle distance using the standard method: x_points = np.tile(xpts[:, np.newaxis], (1, n_closest_points)) y_points = np.tile(ypts[:, np.newaxis], (1, n_closest_points)) bd = core.great_circle_distance( x_points, y_points, self.X_ADJUSTED[bd_idx], self.Y_ADJUSTED[bd_idx] ) if backend == "loop": zvalues, sigmasq = self._exec_loop_moving_window(a, bd, mask, bd_idx) elif backend == "C": zvalues, sigmasq = _c_exec_loop_moving_window( a, bd, mask.astype("int8"), bd_idx.astype(int), self.X_ADJUSTED.shape[0], c_pars, ) else: raise ValueError( "Specified backend {} for a moving window " "is not supported.".format(backend) ) else: if self.coordinates_type == "euclidean": bd = cdist(xy_points, xy_data, "euclidean") elif self.coordinates_type == "geographic": bd = core.great_circle_distance( xpts[:, np.newaxis], ypts[:, np.newaxis], self.X_ADJUSTED, self.Y_ADJUSTED, ) if backend == "vectorized": zvalues, sigmasq = self._exec_vector(a, bd, mask) elif backend == "loop": zvalues, sigmasq = self._exec_loop(a, bd, mask) elif backend == "C": zvalues, sigmasq = _c_exec_loop( a, bd, mask.astype("int8"), self.X_ADJUSTED.shape[0], c_pars ) else: raise ValueError( "Specified backend {} is not supported for " "2D ordinary kriging.".format(backend) ) if style == "masked": zvalues = np.ma.array(zvalues, mask=mask) sigmasq = np.ma.array(sigmasq, mask=mask) if style in ["masked", "grid"]: zvalues = zvalues.reshape((ny, nx)) sigmasq = sigmasq.reshape((ny, nx)) return zvalues, sigmasq
42,554
40.679726
88
py
PyKrige
PyKrige-main/src/pykrige/compat_gstools.py
# pylint: disable= invalid-name, unused-import """For GSTools compatibility.""" # gstools try: import gstools as gs GSTOOLS_INSTALLED = True GSTOOLS_VERSION = list(map(int, gs.__version__.split(".")[:2])) except ImportError: gs = None GSTOOLS_INSTALLED = False GSTOOLS_VERSION = None class GSToolsException(Exception): """Exception for GSTools.""" def validate_gstools(model): """Validate presence of GSTools.""" if not GSTOOLS_INSTALLED: raise GSToolsException( "GSTools needs to be installed in order to use their CovModel class." ) if not isinstance(model, gs.CovModel): raise GSToolsException( "GSTools: given variogram model is not a CovModel instance." ) if GSTOOLS_VERSION < [1, 3]: raise GSToolsException("GSTools: need at least GSTools v1.3.") if model.latlon and GSTOOLS_VERSION < [1, 4]: raise GSToolsException( "GSTools: latlon models in PyKrige are only supported from GSTools v1.4." )
1,062
27.72973
85
py
PyKrige
PyKrige-main/src/pykrige/uk.py
""" PyKrige ======= Code by Benjamin S. Murphy and the PyKrige Developers bscott.murphy@gmail.com Summary ------- Contains class UniversalKriging, provides greater control over 2D kriging by utilizing drift terms. References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. Copyright (c) 2015-2020, PyKrige Developers """ import warnings import numpy as np import scipy.linalg from scipy.spatial.distance import cdist from . import core, variogram_models from .compat_gstools import validate_gstools from .core import ( P_INV, _adjust_for_anisotropy, _find_statistics, _initialize_variogram_model, _make_variogram_parameter_list, ) class UniversalKriging: """Provides greater control over 2D kriging by utilizing drift terms. Parameters ---------- x : array_like X-coordinates of data points. y : array_like Y-coordinates of data points. z : array_like Values at data points. variogram_model: str or GSTools CovModel, optional Specified which variogram model to use; may be one of the following: linear, power, gaussian, spherical, exponential, hole-effect. Default is linear variogram model. To utilize a custom variogram model, specify 'custom'; you must also provide variogram_parameters and variogram_function. Note that the hole-effect model is only technically correct for one-dimensional problems. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional Parameters that define the specified variogram model. If not provided, parameters will be automatically calculated using a "soft" L1 norm minimization scheme. For variogram model parameters provided in a dict, the required dict keys vary according to the specified variogram model: :: # linear {'slope': slope, 'nugget': nugget} # power {'scale': scale, 'exponent': exponent, 'nugget': nugget} # gaussian, spherical, exponential and hole-effect: {'sill': s, 'range': r, 'nugget': n} # OR {'psill': p, 'range': r, 'nugget': n} Note that either the full sill or the partial sill (psill = sill - nugget) can be specified in the dict. For variogram model parameters provided in a list, the entries must be as follows: :: # linear [slope, nugget] # power [scale, exponent, nugget] # gaussian, spherical, exponential and hole-effect: [sill, range, nugget] Note that the full sill (NOT the partial sill) must be specified in the list format. For a custom variogram model, the parameters are required, as custom variogram models will not automatically be fit to the data. Furthermore, the parameters must be specified in list format, in the order in which they are used in the callable function (see variogram_function for more information). The code does not check that the provided list contains the appropriate number of parameters for the custom variogram model, so an incorrect parameter list in such a case will probably trigger an esoteric exception someplace deep in the code. NOTE that, while the list format expects the full sill, the code itself works internally with the partial sill. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. The function must take only two arguments: first, a list of parameters for the variogram model; second, the distances at which to calculate the variogram model. The list provided in variogram_parameters will be passed to the function as the first argument. nlags : int, optional Number of averaging bins for the semivariogram. Default is 6. weight : bool, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. The routine is currently hard-coded such that the weights are calculated from a logistic function, so weights at small lags are ~1 and weights at the longest lags are ~0; the center of the logistic weighting is hard-coded to be at 70% of the distance from the shortest lag to the largest lag. Setting this parameter to True indicates that weights will be applied. Default is False. (Kitanidis suggests that the values at smaller lags are more important in fitting a variogram model, so the option is provided to enable such weighting.) anisotropy_scaling : float, optional Scalar stretching value to take into account anisotropy. Default is 1 (effectively no stretching). Scaling is applied in the y-direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle, if anisotropy_angle is not 0). anisotropy_angle : float, optional CCW angle (in degrees) by which to rotate coordinate system in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. drift_terms : list of strings, optional List of drift terms to include in universal kriging. Supported drift terms are currently 'regional_linear', 'point_log', 'external_Z', 'specified', and 'functional'. point_drift : array_like, optional Array-like object that contains the coordinates and strengths of the point-logarithmic drift terms. Array shape must be (N, 3), where N is the number of point drift terms. First column (index 0) must contain x-coordinates, second column (index 1) must contain y-coordinates, and third column (index 2) must contain the strengths of each point term. Strengths are relative, so only the relation of the values to each other matters. Note that the code will appropriately deal with point-logarithmic terms that are at the same coordinates as an evaluation point or data point, but Python will still kick out a warning message that an ln(0) has been encountered. If the problem involves anisotropy, the well coordinates will be adjusted and the drift values will be calculated in the adjusted data frame. external_drift : array_like, optional Gridded data used for the external Z scalar drift term. Must be shape (M, N), where M is in the y-direction and N is in the x-direction. Grid spacing does not need to be constant. If grid spacing is not constant, must specify the grid cell sizes. If the problem involves anisotropy, the external drift values are extracted based on the pre-adjusted coordinates (i.e., the original coordinate system). external_drift_x : array_like, optional X-coordinates for gridded external Z-scalar data. Must be shape (M,) or (M, 1), where M is the number of grid cells in the x-direction. The coordinate is treated as the center of the cell. external_drift_y : array_like, optional Y-coordinates for gridded external Z-scalar data. Must be shape (N,) or (N, 1), where N is the number of grid cells in the y-direction. The coordinate is treated as the center of the cell. specified_drift : list of array-like objects, optional List of arrays that contain the drift values at data points. The arrays must be shape (N,) or (N, 1), where N is the number of data points. Any number of specified-drift terms may be used. functional_drift : list of callable objects, optional List of callable functions that will be used to evaluate drift terms. The function must be a function of only the two spatial coordinates and must return a single value for each coordinate pair. It must be set up to be called with only two arguments, first an array of x values and second an array of y values. If the problem involves anisotropy, the drift values are calculated in the adjusted data frame. verbose : bool, optional Enables program text output to monitor kriging process. Default is False (off). enable_plotting : boolean, optional Enables plotting to display variogram. Default is False (off). exact_values : bool, optional If True, interpolation provides input values at input locations. If False, interpolation accounts for variance/nugget within input values at input locations and does not behave as an exact-interpolator [2]. Note that this only has an effect if there is variance/nugget present within the input data since it is interpreted as measurement error. If the nugget is zero, the kriged field will behave as an exact interpolator. pseudo_inv : :class:`bool`, optional Whether the kriging system is solved with the pseudo inverted kriging matrix. If `True`, this leads to more numerical stability and redundant points are averaged. But it can take more time. Default: False pseudo_inv_type : :class:`str`, optional Here you can select the algorithm to compute the pseudo-inverse matrix: * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values Default: `"pinv"` References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. """ UNBIAS = True # This can be changed to remove the unbiasedness condition # Really for testing purposes only... eps = 1.0e-10 # Cutoff for comparison to zero variogram_dict = { "linear": variogram_models.linear_variogram_model, "power": variogram_models.power_variogram_model, "gaussian": variogram_models.gaussian_variogram_model, "spherical": variogram_models.spherical_variogram_model, "exponential": variogram_models.exponential_variogram_model, "hole-effect": variogram_models.hole_effect_variogram_model, } def __init__( self, x, y, z, variogram_model="linear", variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling=1.0, anisotropy_angle=0.0, drift_terms=None, point_drift=None, external_drift=None, external_drift_x=None, external_drift_y=None, specified_drift=None, functional_drift=None, verbose=False, enable_plotting=False, exact_values=True, pseudo_inv=False, pseudo_inv_type="pinv", ): # config the pseudo inverse self.pseudo_inv = bool(pseudo_inv) self.pseudo_inv_type = str(pseudo_inv_type) if self.pseudo_inv_type not in P_INV: raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type)) # Deal with mutable default argument if drift_terms is None: drift_terms = [] if specified_drift is None: specified_drift = [] if functional_drift is None: functional_drift = [] # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None if not isinstance(exact_values, bool): raise ValueError("exact_values has to be boolean True or False") self.exact_values = exact_values # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim == 3: raise ValueError("GSTools: model dim is not 1 or 2") if self.model.latlon: raise ValueError( "GSTools: latlon models not supported for universal kriging" ) self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling = self.model.pykrige_anis anisotropy_angle = self.model.pykrige_angle if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] # Code assumes 1D input arrays. Ensures that any extraneous dimensions # don't get in the way. Copies are created to avoid any problems with # referencing the original passed arguments. self.X_ORIG = np.atleast_1d( np.squeeze(np.array(x, copy=True, dtype=np.float64)) ) self.Y_ORIG = np.atleast_1d( np.squeeze(np.array(y, copy=True, dtype=np.float64)) ) self.Z = np.atleast_1d(np.squeeze(np.array(z, copy=True, dtype=np.float64))) self.verbose = verbose self.enable_plotting = enable_plotting if self.enable_plotting and self.verbose: print("Plotting Enabled\n") # adjust for anisotropy... self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0 self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0 self.anisotropy_scaling = anisotropy_scaling self.anisotropy_angle = anisotropy_angle if self.verbose: print("Adjusting data for anisotropy...") self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG)).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle], ).T if self.verbose: print("Initializing variogram model...") # see comment in ok.py about 'use_psill' kwarg... vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, "euclidean", ) # TODO extend geographic capabilities to UK... if self.verbose: if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_function, self.variogram_model_parameters, "euclidean", self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") if self.verbose: print("Initializing drift terms...") # Note that the regional linear drift values will be based # on the adjusted coordinate system, Really, it doesn't actually # matter which coordinate system is used here. if "regional_linear" in drift_terms: self.regional_linear_drift = True if self.verbose: print("Implementing regional linear drift.") else: self.regional_linear_drift = False # External Z scalars are extracted using the original # (unadjusted) coordinates. if "external_Z" in drift_terms: if external_drift is None: raise ValueError("Must specify external Z drift terms.") if external_drift_x is None or external_drift_y is None: raise ValueError("Must specify coordinates of external Z drift terms.") self.external_Z_drift = True if ( external_drift.shape[0] != external_drift_y.shape[0] or external_drift.shape[1] != external_drift_x.shape[0] ): if ( external_drift.shape[0] == external_drift_x.shape[0] and external_drift.shape[1] == external_drift_y.shape[0] ): self.external_Z_array = np.array(external_drift.T) else: raise ValueError( "External drift dimensions do not match " "provided x- and y-coordinate dimensions." ) else: self.external_Z_array = np.array(external_drift) self.external_Z_array_x = np.array(external_drift_x).flatten() self.external_Z_array_y = np.array(external_drift_y).flatten() self.z_scalars = self._calculate_data_point_zscalars( self.X_ORIG, self.Y_ORIG ) if self.verbose: print("Implementing external Z drift.") else: self.external_Z_drift = False # Well coordinates are rotated into adjusted coordinate frame. if "point_log" in drift_terms: if point_drift is None: raise ValueError( "Must specify location(s) and strength(s) of point drift terms." ) self.point_log_drift = True point_log = np.atleast_2d(np.squeeze(np.array(point_drift, copy=True))) self.point_log_array = np.zeros(point_log.shape) self.point_log_array[:, 2] = point_log[:, 2] self.point_log_array[:, :2] = _adjust_for_anisotropy( np.vstack((point_log[:, 0], point_log[:, 1])).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle], ) if self.verbose: print( "Implementing external point-logarithmic drift; " "number of points =", self.point_log_array.shape[0], "\n", ) else: self.point_log_drift = False if "specified" in drift_terms: if type(specified_drift) is not list: raise TypeError( "Arrays for specified drift terms must be " "encapsulated in a list." ) if len(specified_drift) == 0: raise ValueError( "Must provide at least one drift-value array " "when using the 'specified' drift capability." ) self.specified_drift = True self.specified_drift_data_arrays = [] for term in specified_drift: specified = np.squeeze(np.array(term, copy=True)) if specified.size != self.X_ORIG.size: raise ValueError( "Must specify the drift values for each " "data point when using the 'specified' " "drift capability." ) self.specified_drift_data_arrays.append(specified) else: self.specified_drift = False # The provided callable functions will be evaluated using # the adjusted coordinates. if "functional" in drift_terms: if type(functional_drift) is not list: raise TypeError( "Callables for functional drift terms must " "be encapsulated in a list." ) if len(functional_drift) == 0: raise ValueError( "Must provide at least one callable object " "when using the 'functional' drift capability." ) self.functional_drift = True self.functional_drift_terms = functional_drift else: self.functional_drift = False def _calculate_data_point_zscalars(self, x, y, type_="array"): """Determines the Z-scalar values at the specified coordinates for use when setting up the kriging matrix. Uses bilinear interpolation. Currently, the Z scalar values are extracted from the input Z grid exactly at the specified coordinates. This means that if the Z grid resolution is finer than the resolution of the desired kriged grid, there is no averaging of the scalar values to return an average Z value for that cell in the kriged grid. Rather, the exact Z value right at the coordinate is used.""" if type_ == "scalar": nx = 1 ny = 1 z_scalars = None else: if x.ndim == 1: nx = x.shape[0] ny = 1 else: ny = x.shape[0] nx = x.shape[1] z_scalars = np.zeros(x.shape) for m in range(ny): for n in range(nx): if type_ == "scalar": xn = x yn = y else: if x.ndim == 1: xn = x[n] yn = y[n] else: xn = x[m, n] yn = y[m, n] if ( xn > np.amax(self.external_Z_array_x) or xn < np.amin(self.external_Z_array_x) or yn > np.amax(self.external_Z_array_y) or yn < np.amin(self.external_Z_array_y) ): raise ValueError( "External drift array does not cover " "specified kriging domain." ) # bilinear interpolation external_x2_index = np.amin(np.where(self.external_Z_array_x >= xn)[0]) external_x1_index = np.amax(np.where(self.external_Z_array_x <= xn)[0]) external_y2_index = np.amin(np.where(self.external_Z_array_y >= yn)[0]) external_y1_index = np.amax(np.where(self.external_Z_array_y <= yn)[0]) if external_y1_index == external_y2_index: if external_x1_index == external_x2_index: z = self.external_Z_array[external_y1_index, external_x1_index] else: z = ( self.external_Z_array[external_y1_index, external_x1_index] * (self.external_Z_array_x[external_x2_index] - xn) + self.external_Z_array[ external_y2_index, external_x2_index ] * (xn - self.external_Z_array_x[external_x1_index]) ) / ( self.external_Z_array_x[external_x2_index] - self.external_Z_array_x[external_x1_index] ) elif external_x1_index == external_x2_index: if external_y1_index == external_y2_index: z = self.external_Z_array[external_y1_index, external_x1_index] else: z = ( self.external_Z_array[external_y1_index, external_x1_index] * (self.external_Z_array_y[external_y2_index] - yn) + self.external_Z_array[ external_y2_index, external_x2_index ] * (yn - self.external_Z_array_y[external_y1_index]) ) / ( self.external_Z_array_y[external_y2_index] - self.external_Z_array_y[external_y1_index] ) else: z = ( self.external_Z_array[external_y1_index, external_x1_index] * (self.external_Z_array_x[external_x2_index] - xn) * (self.external_Z_array_y[external_y2_index] - yn) + self.external_Z_array[external_y1_index, external_x2_index] * (xn - self.external_Z_array_x[external_x1_index]) * (self.external_Z_array_y[external_y2_index] - yn) + self.external_Z_array[external_y2_index, external_x1_index] * (self.external_Z_array_x[external_x2_index] - xn) * (yn - self.external_Z_array_y[external_y1_index]) + self.external_Z_array[external_y2_index, external_x2_index] * (xn - self.external_Z_array_x[external_x1_index]) * (yn - self.external_Z_array_y[external_y1_index]) ) / ( ( self.external_Z_array_x[external_x2_index] - self.external_Z_array_x[external_x1_index] ) * ( self.external_Z_array_y[external_y2_index] - self.external_Z_array_y[external_y1_index] ) ) if type_ == "scalar": z_scalars = z else: if z_scalars.ndim == 1: z_scalars[n] = z else: z_scalars[m, n] = z return z_scalars def update_variogram_model( self, variogram_model, variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling=1.0, anisotropy_angle=0.0, ): """Allows user to update variogram type and/or variogram model parameters. Parameters ---------- variogram_model : str or GSTools CovModel May be any of the variogram models listed above. May also be 'custom', in which case variogram_parameters and variogram_function must be specified. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional List or dict of variogram model parameters, as explained above. If not provided, a best fit model will be calculated as described above. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. See above for more information. nlags : int, optional Number of averaging bins for the semivariogram. Defualt is 6. weight : boolean, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating the variogram model. See above for more information. True indicates that weights will be applied. Default is False. anisotropy_scaling : float, optional Scalar stretching value to take into account anisotropy. Default is 1 (effectively no stretching). Scaling is applied in the y-direction. anisotropy_angle : float, optional CCW angle (in degrees) by which to rotate coordinate system in order to take into account anisotropy. Default is 0 (no rotation). """ # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim == 3: raise ValueError("GSTools: model dim is not 1 or 2") if self.model.latlon: raise ValueError( "GSTools: latlon models not supported for universal kriging" ) self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling = self.model.pykrige_anis anisotropy_angle = self.model.pykrige_angle if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] if ( anisotropy_scaling != self.anisotropy_scaling or anisotropy_angle != self.anisotropy_angle ): if self.verbose: print("Adjusting data for anisotropy...") self.anisotropy_scaling = anisotropy_scaling self.anisotropy_angle = anisotropy_angle self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG)).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle], ).T if self.verbose: print("Updating variogram mode...") # See note above about the 'use_psill' kwarg... vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, "euclidean", ) if self.verbose: if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T, self.Z, self.variogram_function, self.variogram_model_parameters, "euclidean", self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") def display_variogram_model(self): """Displays variogram model with the actual binned data.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(self.lags, self.semivariance, "r*") ax.plot( self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), "k-", ) plt.show() def get_variogram_points(self): """Returns both the lags and the variogram function evaluated at each of them. The evaluation of the variogram function and the lags are produced internally. This method is convenient when the user wants to access to the lags and the resulting variogram (according to the model provided) for further analysis. Returns ------- (tuple) tuple containing: lags (array) - the lags at which the variogram was evaluated variogram (array) - the variogram function evaluated at the lags """ return ( self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), ) def switch_verbose(self): """Allows user to switch code talk-back on/off. Takes no arguments.""" self.verbose = not self.verbose def switch_plotting(self): """Allows user to switch plot display on/off. Takes no arguments.""" self.enable_plotting = not self.enable_plotting def get_epsilon_residuals(self): """Returns the epsilon residuals for the variogram fit.""" return self.epsilon def plot_epsilon_residuals(self): """Plots the epsilon residuals for the variogram fit.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*") ax.axhline(y=0.0) plt.show() def get_statistics(self): """Returns the Q1, Q2, and cR statistics for the variogram fit (in that order). No arguments. """ return self.Q1, self.Q2, self.cR def print_statistics(self): """Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. """ print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR) def _get_kriging_matrix(self, n, n_withdrifts): """Assembles the kriging matrix.""" xy = np.concatenate( (self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1 ) d = cdist(xy, xy, "euclidean") if self.UNBIAS: a = np.zeros((n_withdrifts + 1, n_withdrifts + 1)) else: a = np.zeros((n_withdrifts, n_withdrifts)) a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d) np.fill_diagonal(a, 0.0) i = n if self.regional_linear_drift: a[:n, i] = self.X_ADJUSTED a[i, :n] = self.X_ADJUSTED i += 1 a[:n, i] = self.Y_ADJUSTED a[i, :n] = self.Y_ADJUSTED i += 1 if self.point_log_drift: for well_no in range(self.point_log_array.shape[0]): log_dist = np.log( np.sqrt( (self.X_ADJUSTED - self.point_log_array[well_no, 0]) ** 2 + (self.Y_ADJUSTED - self.point_log_array[well_no, 1]) ** 2 ) ) if np.any(np.isinf(log_dist)): log_dist[np.isinf(log_dist)] = -100.0 a[:n, i] = -self.point_log_array[well_no, 2] * log_dist a[i, :n] = -self.point_log_array[well_no, 2] * log_dist i += 1 if self.external_Z_drift: a[:n, i] = self.z_scalars a[i, :n] = self.z_scalars i += 1 if self.specified_drift: for arr in self.specified_drift_data_arrays: a[:n, i] = arr a[i, :n] = arr i += 1 if self.functional_drift: for func in self.functional_drift_terms: a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED) a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED) i += 1 if i != n_withdrifts: warnings.warn( "Error in creating kriging matrix. Kriging may fail.", RuntimeWarning ) if self.UNBIAS: a[n_withdrifts, :n] = 1.0 a[:n, n_withdrifts] = 1.0 a[n : n_withdrifts + 1, n : n_withdrifts + 1] = 0.0 return a def _exec_vector(self, a, bd, xy, xy_orig, mask, n_withdrifts, spec_drift_grids): """Solves the kriging system as a vectorized operation. This method can take a lot of memory for large grids and/or large datasets.""" npt = bd.shape[0] n = self.X_ADJUSTED.shape[0] zero_index = None zero_value = False # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) if self.UNBIAS: b = np.zeros((npt, n_withdrifts + 1, 1)) else: b = np.zeros((npt, n_withdrifts, 1)) b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], zero_index[1], 0] = 0.0 i = n if self.regional_linear_drift: b[:, i, 0] = xy[:, 0] i += 1 b[:, i, 0] = xy[:, 1] i += 1 if self.point_log_drift: for well_no in range(self.point_log_array.shape[0]): log_dist = np.log( np.sqrt( (xy[:, 0] - self.point_log_array[well_no, 0]) ** 2 + (xy[:, 1] - self.point_log_array[well_no, 1]) ** 2 ) ) if np.any(np.isinf(log_dist)): log_dist[np.isinf(log_dist)] = -100.0 b[:, i, 0] = -self.point_log_array[well_no, 2] * log_dist i += 1 if self.external_Z_drift: b[:, i, 0] = self._calculate_data_point_zscalars( xy_orig[:, 0], xy_orig[:, 1] ) i += 1 if self.specified_drift: for spec_vals in spec_drift_grids: b[:, i, 0] = spec_vals.flatten() i += 1 if self.functional_drift: for func in self.functional_drift_terms: b[:, i, 0] = func(xy[:, 0], xy[:, 1]) i += 1 if i != n_withdrifts: warnings.warn( "Error in setting up kriging system. Kriging may fail.", RuntimeWarning, ) if self.UNBIAS: b[:, n_withdrifts, 0] = 1.0 if (~mask).any(): mask_b = np.repeat( mask[:, np.newaxis, np.newaxis], n_withdrifts + 1, axis=1 ) b = np.ma.array(b, mask=mask_b) if self.UNBIAS: x = ( np.dot(a_inv, b.reshape((npt, n_withdrifts + 1)).T) .reshape((1, n_withdrifts + 1, npt)) .T ) else: x = ( np.dot(a_inv, b.reshape((npt, n_withdrifts)).T) .reshape((1, n_withdrifts, npt)) .T ) zvalues = np.sum(x[:, :n, 0] * self.Z, axis=1) sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1) return zvalues, sigmasq def _exec_loop(self, a, bd_all, xy, xy_orig, mask, n_withdrifts, spec_drift_grids): """Solves the kriging system by looping over all specified points. Less memory-intensive, but involves a Python-level loop.""" npt = bd_all.shape[0] n = self.X_ADJUSTED.shape[0] zvalues = np.zeros(npt) sigmasq = np.zeros(npt) # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) for j in np.nonzero(~mask)[ 0 ]: # Note that this is the same thing as range(npt) if mask is not defined, bd = bd_all[j] # otherwise it takes the non-masked elements. if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_index = None zero_value = False if self.UNBIAS: b = np.zeros((n_withdrifts + 1, 1)) else: b = np.zeros((n_withdrifts, 1)) b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], 0] = 0.0 i = n if self.regional_linear_drift: b[i, 0] = xy[j, 0] i += 1 b[i, 0] = xy[j, 1] i += 1 if self.point_log_drift: for well_no in range(self.point_log_array.shape[0]): log_dist = np.log( np.sqrt( (xy[j, 0] - self.point_log_array[well_no, 0]) ** 2 + (xy[j, 1] - self.point_log_array[well_no, 1]) ** 2 ) ) if np.any(np.isinf(log_dist)): log_dist[np.isinf(log_dist)] = -100.0 b[i, 0] = -self.point_log_array[well_no, 2] * log_dist i += 1 if self.external_Z_drift: b[i, 0] = self._calculate_data_point_zscalars( xy_orig[j, 0], xy_orig[j, 1], type_="scalar" ) i += 1 if self.specified_drift: for spec_vals in spec_drift_grids: b[i, 0] = spec_vals.flatten()[i] i += 1 if self.functional_drift: for func in self.functional_drift_terms: b[i, 0] = func(xy[j, 0], xy[j, 1]) i += 1 if i != n_withdrifts: warnings.warn( "Error in setting up kriging system. Kriging may fail.", RuntimeWarning, ) if self.UNBIAS: b[n_withdrifts, 0] = 1.0 x = np.dot(a_inv, b) zvalues[j] = np.sum(x[:n, 0] * self.Z) sigmasq[j] = np.sum(x[:, 0] * -b[:, 0]) return zvalues, sigmasq def execute( self, style, xpoints, ypoints, mask=None, backend="vectorized", specified_drift_arrays=None, ): """Calculates a kriged grid and the associated variance. Includes drift terms. Parameters ---------- style : str Specifies how to treat input kriging points. Specifying 'grid' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid. Specifying 'points' treats xpoints and ypoints as two arrays that provide coordinate pairs at which to solve the kriging system. Specifying 'masked' treats xpoints and ypoints as two arrays of x and y coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints : array_like, shape (N,) or (N, 1) If style is specific as 'grid' or 'masked', x-coordinates of MxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints : array-like, shape (M,) or (M, 1) If style is specified as 'grid' or 'masked', y-coordinates of MxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints and ypoints must have the same dimensions (i.e., M = N). mask : boolean array, shape (M, N), optional Specifies the points in the rectangular grid defined by xpoints and ypoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system should will not be solved at the point. backend : str, optional Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. Note that Cython backend is not supported for UK. specified_drift_arrays : list of array-like objects, optional Specifies the drift values at the points at which the kriging system is to be evaluated. Required if 'specified' drift provided in the list of drift terms when instantiating the UniversalKriging class. Must be a list of arrays in the same order as the list provided when instantiating the kriging object. Array(s) must be the same dimension as the specified grid or have the same number of points as the specified points; i.e., the arrays either must be shape (M, N), where M is the number of y grid-points and N is the number of x grid-points, or shape (M, ) or (N, 1), where M is the number of points at which to evaluate the kriging system. Returns ------- zvalues : ndarray, shape (M, N) or (N, 1) Z-values of specified grid or at the specified set of points. If style was specified as 'masked', zvalues will be a numpy masked array. sigmasq : ndarray, shape (M, N) or (N, 1) Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. """ if self.verbose: print("Executing Universal Kriging...\n") if style != "grid" and style != "masked" and style != "points": raise ValueError("style argument must be 'grid', 'points', or 'masked'") n = self.X_ADJUSTED.shape[0] n_withdrifts = n xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True))) ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True))) nx = xpts.size ny = ypts.size if self.regional_linear_drift: n_withdrifts += 2 if self.point_log_drift: n_withdrifts += self.point_log_array.shape[0] if self.external_Z_drift: n_withdrifts += 1 if self.specified_drift: n_withdrifts += len(self.specified_drift_data_arrays) if self.functional_drift: n_withdrifts += len(self.functional_drift_terms) a = self._get_kriging_matrix(n, n_withdrifts) if style in ["grid", "masked"]: if style == "masked": if mask is None: raise IOError( "Must specify boolean masking array when style is 'masked'." ) if mask.shape[0] != ny or mask.shape[1] != nx: if mask.shape[0] == nx and mask.shape[1] == ny: mask = mask.T else: raise ValueError( "Mask dimensions do not match specified grid dimensions." ) mask = mask.flatten() npt = ny * nx grid_x, grid_y = np.meshgrid(xpts, ypts) xpts = grid_x.flatten() ypts = grid_y.flatten() elif style == "points": if xpts.size != ypts.size: raise ValueError( "xpoints and ypoints must have same " "dimensions when treated as listing " "discrete points." ) npt = nx else: raise ValueError("style argument must be 'grid', 'points', or 'masked'") if specified_drift_arrays is None: specified_drift_arrays = [] spec_drift_grids = [] if self.specified_drift: if len(specified_drift_arrays) == 0: raise ValueError( "Must provide drift values for kriging points " "when using 'specified' drift capability." ) if type(specified_drift_arrays) is not list: raise TypeError( "Arrays for specified drift terms must be " "encapsulated in a list." ) for spec in specified_drift_arrays: if style in ["grid", "masked"]: if spec.ndim < 2: raise ValueError( "Dimensions of drift values array do " "not match specified grid dimensions." ) elif spec.shape[0] != ny or spec.shape[1] != nx: if spec.shape[0] == nx and spec.shape[1] == ny: spec_drift_grids.append(np.squeeze(spec.T)) else: raise ValueError( "Dimensions of drift values array " "do not match specified grid " "dimensions." ) else: spec_drift_grids.append(np.squeeze(spec)) elif style == "points": if spec.ndim != 1: raise ValueError( "Dimensions of drift values array do " "not match specified grid dimensions." ) elif spec.shape[0] != xpts.size: raise ValueError( "Number of supplied drift values in " "array do not match specified number " "of kriging points." ) else: spec_drift_grids.append(np.squeeze(spec)) if len(spec_drift_grids) != len(self.specified_drift_data_arrays): raise ValueError( "Inconsistent number of specified drift terms supplied." ) else: if len(specified_drift_arrays) != 0: warnings.warn( "Provided specified drift values, but " "'specified' drift was not initialized during " "instantiation of UniversalKriging class.", RuntimeWarning, ) xy_points_original = np.concatenate( (xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1 ) xpts, ypts = _adjust_for_anisotropy( np.vstack((xpts, ypts)).T, [self.XCENTER, self.YCENTER], [self.anisotropy_scaling], [self.anisotropy_angle], ).T xy_points = np.concatenate((xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1) xy_data = np.concatenate( (self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1 ) if style != "masked": mask = np.zeros(npt, dtype="bool") bd = cdist(xy_points, xy_data, "euclidean") if backend == "vectorized": zvalues, sigmasq = self._exec_vector( a, bd, xy_points, xy_points_original, mask, n_withdrifts, spec_drift_grids, ) elif backend == "loop": zvalues, sigmasq = self._exec_loop( a, bd, xy_points, xy_points_original, mask, n_withdrifts, spec_drift_grids, ) else: raise ValueError( "Specified backend {} is not supported " "for 2D universal kriging.".format(backend) ) if style == "masked": zvalues = np.ma.array(zvalues, mask=mask) sigmasq = np.ma.array(sigmasq, mask=mask) if style in ["masked", "grid"]: zvalues = zvalues.reshape((ny, nx)) sigmasq = sigmasq.reshape((ny, nx)) return zvalues, sigmasq
56,799
41.706767
87
py
PyKrige
PyKrige-main/src/pykrige/core.py
""" PyKrige ======= Code by Benjamin S. Murphy and the PyKrige Developers bscott.murphy@gmail.com Summary ------- Methods used by multiple classes. References ---------- [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. [2] T. Vincenty, Direct and Inverse Solutions of Geodesics on the Ellipsoid with Application of Nested Equations, Survey Review 23 (176), (Directorate of Overseas Survey, Kingston Road, Tolworth, Surrey 1975) Copyright (c) 2015-2020, PyKrige Developers """ import numpy as np import scipy.linalg as spl from scipy.optimize import least_squares from scipy.spatial.distance import cdist, pdist, squareform eps = 1.0e-10 # Cutoff for comparison to zero P_INV = {"pinv": spl.pinv, "pinvh": spl.pinvh} def great_circle_distance(lon1, lat1, lon2, lat2): """Calculate the great circle distance between one or multiple pairs of points given in spherical coordinates. Spherical coordinates are expected in degrees. Angle definition follows standard longitude/latitude definition. This uses the arctan version of the great-circle distance function (en.wikipedia.org/wiki/Great-circle_distance) for increased numerical stability. Parameters ---------- lon1: float scalar or numpy array Longitude coordinate(s) of the first element(s) of the point pair(s), given in degrees. lat1: float scalar or numpy array Latitude coordinate(s) of the first element(s) of the point pair(s), given in degrees. lon2: float scalar or numpy array Longitude coordinate(s) of the second element(s) of the point pair(s), given in degrees. lat2: float scalar or numpy array Latitude coordinate(s) of the second element(s) of the point pair(s), given in degrees. Calculation of distances follows numpy elementwise semantics, so if an array of length N is passed, all input parameters need to be arrays of length N or scalars. Returns ------- distance: float scalar or numpy array The great circle distance(s) (in degrees) between the given pair(s) of points. """ # Convert to radians: lat1 = np.array(lat1) * np.pi / 180.0 lat2 = np.array(lat2) * np.pi / 180.0 dlon = (lon1 - lon2) * np.pi / 180.0 # Evaluate trigonometric functions that need to be evaluated more # than once: c1 = np.cos(lat1) s1 = np.sin(lat1) c2 = np.cos(lat2) s2 = np.sin(lat2) cd = np.cos(dlon) # This uses the arctan version of the great-circle distance function # from en.wikipedia.org/wiki/Great-circle_distance for increased # numerical stability. # Formula can be obtained from [2] combining eqns. (14)-(16) # for spherical geometry (f=0). return ( 180.0 / np.pi * np.arctan2( np.sqrt((c2 * np.sin(dlon)) ** 2 + (c1 * s2 - s1 * c2 * cd) ** 2), s1 * s2 + c1 * c2 * cd, ) ) def euclid3_to_great_circle(euclid3_distance): """Convert euclidean distance between points on a unit sphere to the corresponding great circle distance. Parameters ---------- euclid3_distance: float scalar or numpy array The euclidean three-space distance(s) between points on a unit sphere, thus between [0,2]. Returns ------- great_circle_dist: float scalar or numpy array The corresponding great circle distance(s) between the points. """ # Eliminate some possible numerical errors: euclid3_distance[euclid3_distance > 2.0] = 2.0 return 180.0 - 360.0 / np.pi * np.arccos(0.5 * euclid3_distance) def _adjust_for_anisotropy(X, center, scaling, angle): """Adjusts data coordinates to take into account anisotropy. Can also be used to take into account data scaling. Angles are CCW about specified axes. Scaling is applied in rotated coordinate system. Parameters ---------- X : ndarray float array [n_samples, n_dim], the input array of coordinates center : ndarray float array [n_dim], the coordinate of centers scaling : ndarray float array [n_dim - 1], the scaling of last two dimensions angle : ndarray float array [2*n_dim - 3], the anisotropy angle (degrees) Returns ------- X_adj : ndarray float array [n_samples, n_dim], the X array adjusted for anisotropy. """ center = np.asarray(center)[None, :] angle = np.asarray(angle) * np.pi / 180 X -= center Ndim = X.shape[1] if Ndim == 1: raise NotImplementedError("Not implemnented yet?") elif Ndim == 2: stretch = np.array([[1, 0], [0, scaling[0]]]) rot_tot = np.array( [ [np.cos(-angle[0]), -np.sin(-angle[0])], [np.sin(-angle[0]), np.cos(-angle[0])], ] ) elif Ndim == 3: stretch = np.array( [[1.0, 0.0, 0.0], [0.0, scaling[0], 0.0], [0.0, 0.0, scaling[1]]] ) rotate_x = np.array( [ [1.0, 0.0, 0.0], [0.0, np.cos(-angle[0]), -np.sin(-angle[0])], [0.0, np.sin(-angle[0]), np.cos(-angle[0])], ] ) rotate_y = np.array( [ [np.cos(-angle[1]), 0.0, np.sin(-angle[1])], [0.0, 1.0, 0.0], [-np.sin(-angle[1]), 0.0, np.cos(-angle[1])], ] ) rotate_z = np.array( [ [np.cos(-angle[2]), -np.sin(-angle[2]), 0.0], [np.sin(-angle[2]), np.cos(-angle[2]), 0.0], [0.0, 0.0, 1.0], ] ) rot_tot = np.dot(rotate_z, np.dot(rotate_y, rotate_x)) else: raise ValueError( "Adjust for anisotropy function doesn't support ND spaces where N>3" ) X_adj = np.dot(stretch, np.dot(rot_tot, X.T)).T X_adj += center return X_adj def _make_variogram_parameter_list(variogram_model, variogram_model_parameters): """Converts the user input for the variogram model parameters into the format expected in the rest of the code. Makes a list of variogram model parameters in the expected order if the user has provided the model parameters. If not, returns None, which will ensure that the automatic variogram estimation routine is triggered. Parameters ---------- variogram_model : str specifies the variogram model type variogram_model_parameters : list, dict, or None parameters provided by the user, can also be None if the user did not specify the variogram model parameters; if None, this function returns None, that way the automatic variogram estimation routine will kick in down the road... Returns ------- parameter_list : list variogram model parameters stored in a list in the expected order; if variogram_model is 'custom', model parameters should already be encapsulated in a list, so the list is returned unaltered; if variogram_model_parameters was not specified by the user, None is returned; order for internal variogram models is as follows... linear - [slope, nugget] power - [scale, exponent, nugget] gaussian - [psill, range, nugget] spherical - [psill, range, nugget] exponential - [psill, range, nugget] hole-effect - [psill, range, nugget] """ if variogram_model_parameters is None: parameter_list = None elif type(variogram_model_parameters) is dict: if variogram_model in ["linear"]: if ( "slope" not in variogram_model_parameters.keys() or "nugget" not in variogram_model_parameters.keys() ): raise KeyError( "'linear' variogram model requires 'slope' " "and 'nugget' specified in variogram model " "parameter dictionary." ) else: parameter_list = [ variogram_model_parameters["slope"], variogram_model_parameters["nugget"], ] elif variogram_model in ["power"]: if ( "scale" not in variogram_model_parameters.keys() or "exponent" not in variogram_model_parameters.keys() or "nugget" not in variogram_model_parameters.keys() ): raise KeyError( "'power' variogram model requires 'scale', " "'exponent', and 'nugget' specified in " "variogram model parameter dictionary." ) else: parameter_list = [ variogram_model_parameters["scale"], variogram_model_parameters["exponent"], variogram_model_parameters["nugget"], ] elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]: if ( "range" not in variogram_model_parameters.keys() or "nugget" not in variogram_model_parameters.keys() ): raise KeyError( "'%s' variogram model requires 'range', " "'nugget', and either 'sill' or 'psill' " "specified in variogram model parameter " "dictionary." % variogram_model ) else: if "sill" in variogram_model_parameters.keys(): parameter_list = [ variogram_model_parameters["sill"] - variogram_model_parameters["nugget"], variogram_model_parameters["range"], variogram_model_parameters["nugget"], ] elif "psill" in variogram_model_parameters.keys(): parameter_list = [ variogram_model_parameters["psill"], variogram_model_parameters["range"], variogram_model_parameters["nugget"], ] else: raise KeyError( "'%s' variogram model requires either " "'sill' or 'psill' specified in " "variogram model parameter " "dictionary." % variogram_model ) elif variogram_model in ["custom"]: raise TypeError( "For user-specified custom variogram model, " "parameters must be specified in a list, " "not a dict." ) else: raise ValueError( "Specified variogram model must be one of the " "following: 'linear', 'power', 'gaussian', " "'spherical', 'exponential', 'hole-effect', " "'custom'." ) elif type(variogram_model_parameters) is list: if variogram_model in ["linear"]: if len(variogram_model_parameters) != 2: raise ValueError( "Variogram model parameter list must have " "exactly two entries when variogram model " "set to 'linear'." ) parameter_list = variogram_model_parameters elif variogram_model in ["power"]: if len(variogram_model_parameters) != 3: raise ValueError( "Variogram model parameter list must have " "exactly three entries when variogram model " "set to 'power'." ) parameter_list = variogram_model_parameters elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]: if len(variogram_model_parameters) != 3: raise ValueError( "Variogram model parameter list must have " "exactly three entries when variogram model " "set to '%s'." % variogram_model ) parameter_list = [ variogram_model_parameters[0] - variogram_model_parameters[2], variogram_model_parameters[1], variogram_model_parameters[2], ] elif variogram_model in ["custom"]: parameter_list = variogram_model_parameters else: raise ValueError( "Specified variogram model must be one of the " "following: 'linear', 'power', 'gaussian', " "'spherical', 'exponential', 'hole-effect', " "'custom'." ) else: raise TypeError( "Variogram model parameters must be provided in either " "a list or a dict when they are explicitly specified." ) return parameter_list def _initialize_variogram_model( X, y, variogram_model, variogram_model_parameters, variogram_function, nlags, weight, coordinates_type, ): """Initializes the variogram model for kriging. If user does not specify parameters, calls automatic variogram estimation routine. Returns lags, semivariance, and variogram model parameters. Parameters ---------- X: ndarray float array [n_samples, n_dim], the input array of coordinates y: ndarray float array [n_samples], the input array of values to be kriged variogram_model: str user-specified variogram model to use variogram_model_parameters: list user-specified parameters for variogram model variogram_function: callable function that will be called to evaluate variogram model (only used if user does not specify variogram model parameters) nlags: int integer scalar, number of bins into which to group inter-point distances weight: bool boolean flag that indicates whether the semivariances at smaller lags should be weighted more heavily in the automatic variogram estimation coordinates_type: str type of coordinates in X array, can be 'euclidean' for standard rectangular coordinates or 'geographic' if the coordinates are lat/lon Returns ------- lags: ndarray float array [nlags], distance values for bins into which the semivariances were grouped semivariance: ndarray float array [nlags], averaged semivariance for each bin variogram_model_parameters: list parameters for the variogram model, either returned unaffected if the user specified them or returned from the automatic variogram estimation routine """ # distance calculation for rectangular coords now leverages # scipy.spatial.distance's pdist function, which gives pairwise distances # in a condensed distance vector (distance matrix flattened to a vector) # to calculate semivariances... if coordinates_type == "euclidean": d = pdist(X, metric="euclidean") g = 0.5 * pdist(y[:, None], metric="sqeuclidean") # geographic coordinates only accepted if the problem is 2D # assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat # old method of distance calculation is retained here... # could be improved in the future elif coordinates_type == "geographic": if X.shape[1] != 2: raise ValueError( "Geographic coordinate type only supported for 2D datasets." ) x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True) y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True) z1, z2 = np.meshgrid(y, y, sparse=True) d = great_circle_distance(x1, y1, x2, y2) g = 0.5 * (z1 - z2) ** 2.0 indices = np.indices(d.shape) d = d[(indices[0, :, :] > indices[1, :, :])] g = g[(indices[0, :, :] > indices[1, :, :])] else: raise ValueError( "Specified coordinate type '%s' is not supported." % coordinates_type ) # Equal-sized bins are now implemented. The upper limit on the bins # is appended to the list (instead of calculated as part of the # list comprehension) to avoid any numerical oddities # (specifically, say, ending up as 0.99999999999999 instead of 1.0). # Appending dmax + 0.001 ensures that the largest distance value # is included in the semivariogram calculation. dmax = np.amax(d) dmin = np.amin(d) dd = (dmax - dmin) / nlags bins = [dmin + n * dd for n in range(nlags)] dmax += 0.001 bins.append(dmax) # This old binning method was experimental and doesn't seem # to work too well. Bins were computed such that there are more # at shorter lags. This effectively weights smaller distances more # highly in determining the variogram. As Kitanidis points out, # the variogram fit to the data at smaller lag distances is more # important. However, the value at the largest lag probably ends up # being biased too high for the larger values and thereby throws off # automatic variogram calculation and confuses comparison of the # semivariogram with the variogram model. # # dmax = np.amax(d) # dmin = np.amin(d) # dd = dmax - dmin # bins = [dd*(0.5**n) + dmin for n in range(nlags, 1, -1)] # bins.insert(0, dmin) # bins.append(dmax) lags = np.zeros(nlags) semivariance = np.zeros(nlags) for n in range(nlags): # This 'if... else...' statement ensures that there are data # in the bin so that numpy can actually find the mean. If we # don't test this first, then Python kicks out an annoying warning # message when there is an empty bin and we try to calculate the mean. if d[(d >= bins[n]) & (d < bins[n + 1])].size > 0: lags[n] = np.mean(d[(d >= bins[n]) & (d < bins[n + 1])]) semivariance[n] = np.mean(g[(d >= bins[n]) & (d < bins[n + 1])]) else: lags[n] = np.nan semivariance[n] = np.nan lags = lags[~np.isnan(semivariance)] semivariance = semivariance[~np.isnan(semivariance)] # a few tests the make sure that, if the variogram_model_parameters # are supplied, they have been supplied as expected... # if variogram_model_parameters was not defined, then estimate the variogram if variogram_model_parameters is not None: if variogram_model == "linear" and len(variogram_model_parameters) != 2: raise ValueError( "Exactly two parameters required for linear variogram model." ) elif ( variogram_model in ["power", "spherical", "exponential", "gaussian", "hole-effect"] and len(variogram_model_parameters) != 3 ): raise ValueError( "Exactly three parameters required for " "%s variogram model" % variogram_model ) else: if variogram_model == "custom": raise ValueError( "Variogram parameters must be specified when " "implementing custom variogram model." ) else: variogram_model_parameters = _calculate_variogram_model( lags, semivariance, variogram_model, variogram_function, weight ) return lags, semivariance, variogram_model_parameters def _variogram_residuals(params, x, y, variogram_function, weight): """Function used in variogram model estimation. Returns residuals between calculated variogram and actual data (lags/semivariance). Called by _calculate_variogram_model. Parameters ---------- params: list or 1D array parameters for calculating the model variogram x: ndarray lags (distances) at which to evaluate the model variogram y: ndarray experimental semivariances at the specified lags variogram_function: callable the actual funtion that evaluates the model variogram weight: bool flag for implementing the crude weighting routine, used in order to fit smaller lags better Returns ------- resid: 1d array residuals, dimension same as y """ # this crude weighting routine can be used to better fit the model # variogram to the experimental variogram at smaller lags... # the weights are calculated from a logistic function, so weights at small # lags are ~1 and weights at the longest lags are ~0; # the center of the logistic weighting is hard-coded to be at 70% of the # distance from the shortest lag to the largest lag if weight: drange = np.amax(x) - np.amin(x) k = 2.1972 / (0.1 * drange) x0 = 0.7 * drange + np.amin(x) weights = 1.0 / (1.0 + np.exp(-k * (x0 - x))) weights /= np.sum(weights) resid = (variogram_function(params, x) - y) * weights else: resid = variogram_function(params, x) - y return resid def _calculate_variogram_model( lags, semivariance, variogram_model, variogram_function, weight ): """Function that fits a variogram model when parameters are not specified. Returns variogram model parameters that minimize the RMSE between the specified variogram function and the actual calculated variogram points. Parameters ---------- lags: 1d array binned lags/distances to use for variogram model parameter estimation semivariance: 1d array binned/averaged experimental semivariances to use for variogram model parameter estimation variogram_model: str/unicode specified variogram model to use for parameter estimation variogram_function: callable the actual funtion that evaluates the model variogram weight: bool flag for implementing the crude weighting routine, used in order to fit smaller lags better this is passed on to the residual calculation cfunction, where weighting is actually applied... Returns ------- res: list list of estimated variogram model parameters NOTE that the estimation routine works in terms of the partial sill (psill = sill - nugget) -- setting bounds such that psill > 0 ensures that the sill will always be greater than the nugget... """ if variogram_model == "linear": x0 = [ (np.amax(semivariance) - np.amin(semivariance)) / (np.amax(lags) - np.amin(lags)), np.amin(semivariance), ] bnds = ([0.0, 0.0], [np.inf, np.amax(semivariance)]) elif variogram_model == "power": x0 = [ (np.amax(semivariance) - np.amin(semivariance)) / (np.amax(lags) - np.amin(lags)), 1.1, np.amin(semivariance), ] bnds = ([0.0, 0.001, 0.0], [np.inf, 1.999, np.amax(semivariance)]) else: x0 = [ np.amax(semivariance) - np.amin(semivariance), 0.25 * np.amax(lags), np.amin(semivariance), ] bnds = ( [0.0, 0.0, 0.0], [10.0 * np.amax(semivariance), np.amax(lags), np.amax(semivariance)], ) # use 'soft' L1-norm minimization in order to buffer against # potential outliers (weird/skewed points) res = least_squares( _variogram_residuals, x0, bounds=bnds, loss="soft_l1", args=(lags, semivariance, variogram_function, weight), ) return res.x def _krige( X, y, coords, variogram_function, variogram_model_parameters, coordinates_type, pseudo_inv=False, ): """Sets up and solves the ordinary kriging system for the given coordinate pair. This function is only used for the statistics calculations. Parameters ---------- X: ndarray float array [n_samples, n_dim], the input array of coordinates y: ndarray float array [n_samples], the input array of measurement values coords: ndarray float array [1, n_dim], point at which to evaluate the kriging system variogram_function: callable function that will be called to evaluate variogram model variogram_model_parameters: list user-specified parameters for variogram model coordinates_type: str type of coordinates in X array, can be 'euclidean' for standard rectangular coordinates or 'geographic' if the coordinates are lat/lon pseudo_inv : :class:`bool`, optional Whether the kriging system is solved with the pseudo inverted kriging matrix. If `True`, this leads to more numerical stability and redundant points are averaged. But it can take more time. Default: False Returns ------- zinterp: float kriging estimate at the specified point sigmasq: float mean square error of the kriging estimate """ zero_index = None zero_value = False # calculate distance between points... need a square distance matrix # of inter-measurement-point distances and a vector of distances between # measurement points (X) and the kriging point (coords) if coordinates_type == "euclidean": d = squareform(pdist(X, metric="euclidean")) bd = np.squeeze(cdist(X, coords[None, :], metric="euclidean")) # geographic coordinate distances still calculated in the old way... # assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat # also assume problem is 2D; check done earlier in initializing variogram elif coordinates_type == "geographic": x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True) y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True) d = great_circle_distance(x1, y1, x2, y2) bd = great_circle_distance( X[:, 0], X[:, 1], coords[0] * np.ones(X.shape[0]), coords[1] * np.ones(X.shape[0]), ) # this check is done when initializing variogram, but kept here anyways... else: raise ValueError( "Specified coordinate type '%s' is not supported." % coordinates_type ) # check if kriging point overlaps with measurement point if np.any(np.absolute(bd) <= 1e-10): zero_value = True zero_index = np.where(bd <= 1e-10)[0][0] # set up kriging matrix n = X.shape[0] a = np.zeros((n + 1, n + 1)) a[:n, :n] = -variogram_function(variogram_model_parameters, d) np.fill_diagonal(a, 0.0) a[n, :] = 1.0 a[:, n] = 1.0 a[n, n] = 0.0 # set up RHS b = np.zeros((n + 1, 1)) b[:n, 0] = -variogram_function(variogram_model_parameters, bd) if zero_value: b[zero_index, 0] = 0.0 b[n, 0] = 1.0 # solve if pseudo_inv: res = np.linalg.lstsq(a, b, rcond=None)[0] else: res = np.linalg.solve(a, b) zinterp = np.sum(res[:n, 0] * y) sigmasq = np.sum(res[:, 0] * -b[:, 0]) return zinterp, sigmasq def _find_statistics( X, y, variogram_function, variogram_model_parameters, coordinates_type, pseudo_inv=False, ): """Calculates variogram fit statistics. Returns the delta, sigma, and epsilon values for the variogram fit. These arrays are used for statistics calculations. Parameters ---------- X: ndarray float array [n_samples, n_dim], the input array of coordinates y: ndarray float array [n_samples], the input array of measurement values variogram_function: callable function that will be called to evaluate variogram model variogram_model_parameters: list user-specified parameters for variogram model coordinates_type: str type of coordinates in X array, can be 'euclidean' for standard rectangular coordinates or 'geographic' if the coordinates are lat/lon pseudo_inv : :class:`bool`, optional Whether the kriging system is solved with the pseudo inverted kriging matrix. If `True`, this leads to more numerical stability and redundant points are averaged. But it can take more time. Default: False Returns ------- delta: ndarray residuals between observed values and kriged estimates for those values sigma: ndarray mean error in kriging estimates epsilon: ndarray residuals normalized by their mean error """ delta = np.zeros(y.shape) sigma = np.zeros(y.shape) for i in range(y.shape[0]): # skip the first value in the kriging problem if i == 0: continue else: k, ss = _krige( X[:i, :], y[:i], X[i, :], variogram_function, variogram_model_parameters, coordinates_type, pseudo_inv, ) # if the estimation error is zero, it's probably because # the evaluation point X[i, :] is really close to one of the # kriging system points in X[:i, :]... # in the case of zero estimation error, the results are not stored if np.absolute(ss) < eps: continue delta[i] = y[i] - k sigma[i] = np.sqrt(ss) # only use non-zero entries in these arrays... sigma is used to pull out # non-zero entries in both cases because it is guaranteed to be positive, # whereas delta can be either positive or negative delta = delta[sigma > eps] sigma = sigma[sigma > eps] epsilon = delta / sigma return delta, sigma, epsilon def calcQ1(epsilon): """Returns the Q1 statistic for the variogram fit (see [1]).""" return abs(np.sum(epsilon) / (epsilon.shape[0] - 1)) def calcQ2(epsilon): """Returns the Q2 statistic for the variogram fit (see [1]).""" return np.sum(epsilon**2) / (epsilon.shape[0] - 1) def calc_cR(Q2, sigma): """Returns the cR statistic for the variogram fit (see [1]).""" return Q2 * np.exp(np.sum(np.log(sigma**2)) / sigma.shape[0])
30,289
33.538198
88
py
PyKrige
PyKrige-main/src/pykrige/uk3d.py
""" PyKrige ======= Code by Benjamin S. Murphy and the PyKrige Developers bscott.murphy@gmail.com Summary ------- Contains class UniversalKriging3D. References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. Copyright (c) 2015-2020, PyKrige Developers """ import warnings import numpy as np import scipy.linalg from scipy.spatial.distance import cdist from . import core, variogram_models from .compat_gstools import validate_gstools from .core import ( P_INV, _adjust_for_anisotropy, _find_statistics, _initialize_variogram_model, _make_variogram_parameter_list, ) class UniversalKriging3D: """Three-dimensional universal kriging. Parameters ---------- x : array_like X-coordinates of data points. y : array_like Y-coordinates of data points. z : array_like Z-coordinates of data points. val : array_like Values at data points. variogram_model : str or GSTools CovModel, optional Specified which variogram model to use; may be one of the following: linear, power, gaussian, spherical, exponential, hole-effect. Default is linear variogram model. To utilize a custom variogram model, specify 'custom'; you must also provide variogram_parameters and variogram_function. Note that the hole-effect model is only technically correct for one-dimensional problems. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional Parameters that define the specified variogram model. If not provided, parameters will be automatically calculated using a "soft" L1 norm minimization scheme. For variogram model parameters provided in a dict, the required dict keys vary according to the specified variogram model: :: # linear {'slope': slope, 'nugget': nugget} # power {'scale': scale, 'exponent': exponent, 'nugget': nugget} # gaussian, spherical, exponential and hole-effect: {'sill': s, 'range': r, 'nugget': n} # OR {'psill': p, 'range': r, 'nugget': n} Note that either the full sill or the partial sill (psill = sill - nugget) can be specified in the dict. For variogram model parameters provided in a list, the entries must be as follows: :: # linear [slope, nugget] # power [scale, exponent, nugget] # gaussian, spherical, exponential and hole-effect: [sill, range, nugget] Note that the full sill (NOT the partial sill) must be specified in the list format. For a custom variogram model, the parameters are required, as custom variogram models will not automatically be fit to the data. Furthermore, the parameters must be specified in list format, in the order in which they are used in the callable function (see variogram_function for more information). The code does not check that the provided list contains the appropriate number of parameters for the custom variogram model, so an incorrect parameter list in such a case will probably trigger an esoteric exception someplace deep in the code. NOTE that, while the list format expects the full sill, the code itself works internally with the partial sill. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. The function must take only two arguments: first, a list of parameters for the variogram model; second, the distances at which to calculate the variogram model. The list provided in variogram_parameters will be passed to the function as the first argument. nlags : int, optional Number of averaging bins for the semivariogram. Default is 6. weight : bool, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. The routine is currently hard-coded such that the weights are calculated from a logistic function, so weights at small lags are ~1 and weights at the longest lags are ~0; the center of the logistic weighting is hard-coded to be at 70% of the distance from the shortest lag to the largest lag. Setting this parameter to True indicates that weights will be applied. Default is False. (Kitanidis suggests that the values at smaller lags are more important in fitting a variogram model, so the option is provided to enable such weighting.) anisotropy_scaling_y : float, optional Scalar stretching value to take into account anisotropy in the y direction. Default is 1 (effectively no stretching). Scaling is applied in the y direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z is/are not 0). anisotropy_scaling_z : float, optional Scalar stretching value to take into account anisotropy in the z direction. Default is 1 (effectively no stretching). Scaling is applied in the z direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z is/are not 0). anisotropy_angle_x : float, optional CCW angle (in degrees) by which to rotate coordinate system about the x axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. anisotropy_angle_y : float, optional CCW angle (in degrees) by which to rotate coordinate system about the y axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. anisotropy_angle_z : float, optional CCW angle (in degrees) by which to rotate coordinate system about the z axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. drift_terms : list of strings, optional List of drift terms to include in three-dimensional universal kriging. Supported drift terms are currently 'regional_linear', 'specified', and 'functional'. specified_drift : list of array-like objects, optional List of arrays that contain the drift values at data points. The arrays must be shape (N,) or (N, 1), where N is the number of data points. Any number of specified-drift terms may be used. functional_drift : list of callable objects, optional List of callable functions that will be used to evaluate drift terms. The function must be a function of only the three spatial coordinates and must return a single value for each coordinate triplet. It must be set up to be called with only three arguments, first an array of x values, the second an array of y values, and the third an array of z values. If the problem involves anisotropy, the drift values are calculated in the adjusted data frame. verbose : boolean, optional Enables program text output to monitor kriging process. Default is False (off). enable_plotting : boolean, optional Enables plotting to display variogram. Default is False (off). exact_values : bool, optional If True, interpolation provides input values at input locations. If False, interpolation accounts for variance/nugget within input values at input locations and does not behave as an exact-interpolator [2]. Note that this only has an effect if there is variance/nugget present within the input data since it is interpreted as measurement error. If the nugget is zero, the kriged field will behave as an exact interpolator. pseudo_inv : :class:`bool`, optional Whether the kriging system is solved with the pseudo inverted kriging matrix. If `True`, this leads to more numerical stability and redundant points are averaged. But it can take more time. Default: False pseudo_inv_type : :class:`str`, optional Here you can select the algorithm to compute the pseudo-inverse matrix: * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values Default: `"pinv"` References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. """ UNBIAS = True # This can be changed to remove the unbiasedness condition # Really for testing purposes only... eps = 1.0e-10 # Cutoff for comparison to zero variogram_dict = { "linear": variogram_models.linear_variogram_model, "power": variogram_models.power_variogram_model, "gaussian": variogram_models.gaussian_variogram_model, "spherical": variogram_models.spherical_variogram_model, "exponential": variogram_models.exponential_variogram_model, "hole-effect": variogram_models.hole_effect_variogram_model, } def __init__( self, x, y, z, val, variogram_model="linear", variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0, drift_terms=None, specified_drift=None, functional_drift=None, verbose=False, enable_plotting=False, exact_values=True, pseudo_inv=False, pseudo_inv_type="pinv", ): # config the pseudo inverse self.pseudo_inv = bool(pseudo_inv) self.pseudo_inv_type = str(pseudo_inv_type) if self.pseudo_inv_type not in P_INV: raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type)) # Deal with mutable default argument if drift_terms is None: drift_terms = [] if specified_drift is None: specified_drift = [] if functional_drift is None: functional_drift = [] # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None if not isinstance(exact_values, bool): raise ValueError("exact_values has to be boolean True or False") self.exact_values = exact_values # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim < 3: raise ValueError("GSTools: model dim is not 3") self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling_y = self.model.pykrige_anis_y anisotropy_scaling_z = self.model.pykrige_anis_z anisotropy_angle_x = self.model.pykrige_angle_x anisotropy_angle_y = self.model.pykrige_angle_y anisotropy_angle_z = self.model.pykrige_angle_z if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] # Code assumes 1D input arrays. Ensures that any extraneous dimensions # don't get in the way. Copies are created to avoid any problems with # referencing the original passed arguments. self.X_ORIG = np.atleast_1d( np.squeeze(np.array(x, copy=True, dtype=np.float64)) ) self.Y_ORIG = np.atleast_1d( np.squeeze(np.array(y, copy=True, dtype=np.float64)) ) self.Z_ORIG = np.atleast_1d( np.squeeze(np.array(z, copy=True, dtype=np.float64)) ) self.VALUES = np.atleast_1d( np.squeeze(np.array(val, copy=True, dtype=np.float64)) ) self.verbose = verbose self.enable_plotting = enable_plotting if self.enable_plotting and self.verbose: print("Plotting Enabled\n") self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0 self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0 self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG)) / 2.0 self.anisotropy_scaling_y = anisotropy_scaling_y self.anisotropy_scaling_z = anisotropy_scaling_z self.anisotropy_angle_x = anisotropy_angle_x self.anisotropy_angle_y = anisotropy_angle_y self.anisotropy_angle_z = anisotropy_angle_z if self.verbose: print("Adjusting data for anisotropy...") self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T, [self.XCENTER, self.YCENTER, self.ZCENTER], [self.anisotropy_scaling_y, self.anisotropy_scaling_z], [self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z], ).T if self.verbose: print("Initializing variogram model...") vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, "euclidean", ) if self.verbose: if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_function, self.variogram_model_parameters, "euclidean", self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") if self.verbose: print("Initializing drift terms...") # Note that the regional linear drift values will be based on the # adjusted coordinate system. Really, it doesn't actually matter # which coordinate system is used here. if "regional_linear" in drift_terms: self.regional_linear_drift = True if self.verbose: print("Implementing regional linear drift.") else: self.regional_linear_drift = False if "specified" in drift_terms: if type(specified_drift) is not list: raise TypeError( "Arrays for specified drift terms must be " "encapsulated in a list." ) if len(specified_drift) == 0: raise ValueError( "Must provide at least one drift-value array " "when using the 'specified' drift capability." ) self.specified_drift = True self.specified_drift_data_arrays = [] for term in specified_drift: specified = np.squeeze(np.array(term, copy=True)) if specified.size != self.X_ORIG.size: raise ValueError( "Must specify the drift values for each " "data point when using the " "'specified' drift capability." ) self.specified_drift_data_arrays.append(specified) else: self.specified_drift = False # The provided callable functions will be evaluated using # the adjusted coordinates. if "functional" in drift_terms: if type(functional_drift) is not list: raise TypeError( "Callables for functional drift terms must " "be encapsulated in a list." ) if len(functional_drift) == 0: raise ValueError( "Must provide at least one callable object " "when using the 'functional' drift capability." ) self.functional_drift = True self.functional_drift_terms = functional_drift else: self.functional_drift = False def update_variogram_model( self, variogram_model, variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0, ): """Changes the variogram model and variogram parameters for the kriging system. Parameters ---------- variogram_model : str or GSTools CovModel May be any of the variogram models listed above. May also be 'custom', in which case variogram_parameters and variogram_function must be specified. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional List or dict of variogram model parameters, as explained above. If not provided, a best fit model will be calculated as described above. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. See above for more information. nlags : int, optional) Number of averaging bins for the semivariogram. Default is 6. weight : boolean, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. See above for more information. True indicates that weights will be applied. Default is False. anisotropy_scaling_y : float, optional Scalar stretching value to take into account anisotropy in y-direction. Default is 1 (effectively no stretching). See above for more information. anisotropy_scaling_z : float, optional Scalar stretching value to take into account anisotropy in z-direction. Default is 1 (effectively no stretching). See above for more information. anisotropy_angle_x : float, optional Angle (in degrees) by which to rotate coordinate system about the x axis in order to take into account anisotropy. Default is 0 (no rotation). See above for more information. anisotropy_angle_y : float, optional Angle (in degrees) by which to rotate coordinate system about the y axis in order to take into account anisotropy. Default is 0 (no rotation). See above for more information. anisotropy_angle_z : float, optional Angle (in degrees) by which to rotate coordinate system about the z axis in order to take into account anisotropy. Default is 0 (no rotation). See above for more information. """ # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim < 3: raise ValueError("GSTools: model dim is not 3") self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling_y = self.model.pykrige_anis_y anisotropy_scaling_z = self.model.pykrige_anis_z anisotropy_angle_x = self.model.pykrige_angle_x anisotropy_angle_y = self.model.pykrige_angle_y anisotropy_angle_z = self.model.pykrige_angle_z if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] if ( anisotropy_scaling_y != self.anisotropy_scaling_y or anisotropy_scaling_z != self.anisotropy_scaling_z or anisotropy_angle_x != self.anisotropy_angle_x or anisotropy_angle_y != self.anisotropy_angle_y or anisotropy_angle_z != self.anisotropy_angle_z ): if self.verbose: print("Adjusting data for anisotropy...") self.anisotropy_scaling_y = anisotropy_scaling_y self.anisotropy_scaling_z = anisotropy_scaling_z self.anisotropy_angle_x = anisotropy_angle_x self.anisotropy_angle_y = anisotropy_angle_y self.anisotropy_angle_z = anisotropy_angle_z self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T, [self.XCENTER, self.YCENTER, self.ZCENTER], [self.anisotropy_scaling_y, self.anisotropy_scaling_z], [ self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z, ], ).T if self.verbose: print("Updating variogram mode...") vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, "euclidean", ) if self.verbose: if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_function, self.variogram_model_parameters, "euclidean", self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") def display_variogram_model(self): """Displays semivariogram and variogram model.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(self.lags, self.semivariance, "r*") ax.plot( self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), "k-", ) plt.show() def switch_verbose(self): """Enables/disables program text output. No arguments.""" self.verbose = not self.verbose def switch_plotting(self): """Enables/disable variogram plot display. No arguments.""" self.enable_plotting = not self.enable_plotting def get_epsilon_residuals(self): """Returns the epsilon residuals for the variogram fit. No arguments.""" return self.epsilon def plot_epsilon_residuals(self): """Plots the epsilon residuals for the variogram fit. No arguments.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*") ax.axhline(y=0.0) plt.show() def get_statistics(self): """Returns the Q1, Q2, and cR statistics for the variogram fit (in that order). No arguments. """ return self.Q1, self.Q2, self.cR def print_statistics(self): """Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. """ print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR) def _get_kriging_matrix(self, n, n_withdrifts): """Assembles the kriging matrix.""" xyz = np.concatenate( ( self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.Z_ADJUSTED[:, np.newaxis], ), axis=1, ) d = cdist(xyz, xyz, "euclidean") if self.UNBIAS: a = np.zeros((n_withdrifts + 1, n_withdrifts + 1)) else: a = np.zeros((n_withdrifts, n_withdrifts)) a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d) np.fill_diagonal(a, 0.0) i = n if self.regional_linear_drift: a[:n, i] = self.X_ADJUSTED a[i, :n] = self.X_ADJUSTED i += 1 a[:n, i] = self.Y_ADJUSTED a[i, :n] = self.Y_ADJUSTED i += 1 a[:n, i] = self.Z_ADJUSTED a[i, :n] = self.Z_ADJUSTED i += 1 if self.specified_drift: for arr in self.specified_drift_data_arrays: a[:n, i] = arr a[i, :n] = arr i += 1 if self.functional_drift: for func in self.functional_drift_terms: a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED) a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED) i += 1 if i != n_withdrifts: warnings.warn( "Error in creating kriging matrix. Kriging may fail.", RuntimeWarning ) if self.UNBIAS: a[n_withdrifts, :n] = 1.0 a[:n, n_withdrifts] = 1.0 a[n : n_withdrifts + 1, n : n_withdrifts + 1] = 0.0 return a def _exec_vector(self, a, bd, xyz, mask, n_withdrifts, spec_drift_grids): """Solves the kriging system as a vectorized operation. This method can take a lot of memory for large grids and/or large datasets.""" npt = bd.shape[0] n = self.X_ADJUSTED.shape[0] zero_index = None zero_value = False # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) if self.UNBIAS: b = np.zeros((npt, n_withdrifts + 1, 1)) else: b = np.zeros((npt, n_withdrifts, 1)) b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], zero_index[1], 0] = 0.0 i = n if self.regional_linear_drift: b[:, i, 0] = xyz[:, 2] i += 1 b[:, i, 0] = xyz[:, 1] i += 1 b[:, i, 0] = xyz[:, 0] i += 1 if self.specified_drift: for spec_vals in spec_drift_grids: b[:, i, 0] = spec_vals.flatten() i += 1 if self.functional_drift: for func in self.functional_drift_terms: b[:, i, 0] = func(xyz[:, 2], xyz[:, 1], xyz[:, 0]) i += 1 if i != n_withdrifts: warnings.warn( "Error in setting up kriging system. Kriging may fail.", RuntimeWarning, ) if self.UNBIAS: b[:, n_withdrifts, 0] = 1.0 if (~mask).any(): mask_b = np.repeat( mask[:, np.newaxis, np.newaxis], n_withdrifts + 1, axis=1 ) b = np.ma.array(b, mask=mask_b) if self.UNBIAS: x = ( np.dot(a_inv, b.reshape((npt, n_withdrifts + 1)).T) .reshape((1, n_withdrifts + 1, npt)) .T ) else: x = ( np.dot(a_inv, b.reshape((npt, n_withdrifts)).T) .reshape((1, n_withdrifts, npt)) .T ) kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1) sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1) return kvalues, sigmasq def _exec_loop(self, a, bd_all, xyz, mask, n_withdrifts, spec_drift_grids): """Solves the kriging system by looping over all specified points. Less memory-intensive, but involves a Python-level loop.""" npt = bd_all.shape[0] n = self.X_ADJUSTED.shape[0] kvalues = np.zeros(npt) sigmasq = np.zeros(npt) # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) for j in np.nonzero(~mask)[ 0 ]: # Note that this is the same thing as range(npt) if mask is not defined, bd = bd_all[j] # otherwise it takes the non-masked elements. if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_value = False zero_index = None if self.UNBIAS: b = np.zeros((n_withdrifts + 1, 1)) else: b = np.zeros((n_withdrifts, 1)) b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], 0] = 0.0 i = n if self.regional_linear_drift: b[i, 0] = xyz[j, 2] i += 1 b[i, 0] = xyz[j, 1] i += 1 b[i, 0] = xyz[j, 0] i += 1 if self.specified_drift: for spec_vals in spec_drift_grids: b[i, 0] = spec_vals.flatten()[i] i += 1 if self.functional_drift: for func in self.functional_drift_terms: b[i, 0] = func(xyz[j, 2], xyz[j, 1], xyz[j, 0]) i += 1 if i != n_withdrifts: warnings.warn( "Error in setting up kriging system. Kriging may fail.", RuntimeWarning, ) if self.UNBIAS: b[n_withdrifts, 0] = 1.0 x = np.dot(a_inv, b) kvalues[j] = np.sum(x[:n, 0] * self.VALUES) sigmasq[j] = np.sum(x[:, 0] * -b[:, 0]) return kvalues, sigmasq def execute( self, style, xpoints, ypoints, zpoints, mask=None, backend="vectorized", specified_drift_arrays=None, ): """Calculates a kriged grid and the associated variance. This is now the method that performs the main kriging calculation. Note that currently measurements (i.e., z values) are considered 'exact'. This means that, when a specified coordinate for interpolation is exactly the same as one of the data points, the variogram evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is also always forced to be zero. In forcing the variogram evaluated at data points to be zero, we are effectively saying that there is no variance at that point (no uncertainty, so the value is 'exact'). In the future, the code may include an extra 'exact_values' boolean flag that can be adjusted to specify whether to treat the measurements as 'exact'. Setting the flag to false would indicate that the variogram should not be forced to be zero at zero distance (i.e., when evaluated at data points). Instead, the uncertainty in the point will be equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to the nugget instead of to zero. Parameters ---------- style : str Specifies how to treat input kriging points. Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid. Specifying 'points' treats xpoints, ypoints, and zpoints as arrays that provide coordinates at which to solve the kriging system. Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints : array_like, shape (N,) or (N, 1) If style is specific as 'grid' or 'masked', x-coordinates of LxMxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints : array_like, shape (M,) or (M, 1) If style is specified as 'grid' or 'masked', y-coordinates of LxMxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). zpoints : array_like, shape (L,) or (L, 1) If style is specified as 'grid' or 'masked', z-coordinates of LxMxN grid. If style is specified as 'points', z-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). mask : boolean array, shape (L, M, N), optional Specifies the points in the rectangular grid defined by xpoints, ypoints, zpoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system will not be solved at the point. backend : string, optional Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. specified_drift_arrays : list of array-like objects, optional Specifies the drift values at the points at which the kriging system is to be evaluated. Required if 'specified' drift provided in the list of drift terms when instantiating the UniversalKriging3D class. Must be a list of arrays in the same order as the list provided when instantiating the kriging object. Array(s) must be the same dimension as the specified grid or have the same number of points as the specified points; i.e., the arrays either must be shape (L, M, N), where L is the number of z grid-points, M is the number of y grid-points, and N is the number of x grid-points, or shape (N,) or (N, 1), where N is the number of points at which to evaluate the kriging system. Returns ------- kvalues : ndarray, shape (L, M, N) or (N,) or (N, 1) Interpolated values of specified grid or at the specified set of points. If style was specified as 'masked', kvalues will be a numpy masked array. sigmasq : ndarray, shape (L, M, N) or (N,) or (N, 1) Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. """ if self.verbose: print("Executing Ordinary Kriging...\n") if style != "grid" and style != "masked" and style != "points": raise ValueError("style argument must be 'grid', 'points', or 'masked'") xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True))) ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True))) zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True))) n = self.X_ADJUSTED.shape[0] n_withdrifts = n if self.regional_linear_drift: n_withdrifts += 3 if self.specified_drift: n_withdrifts += len(self.specified_drift_data_arrays) if self.functional_drift: n_withdrifts += len(self.functional_drift_terms) nx = xpts.size ny = ypts.size nz = zpts.size a = self._get_kriging_matrix(n, n_withdrifts) if style in ["grid", "masked"]: if style == "masked": if mask is None: raise IOError( "Must specify boolean masking array when style is 'masked'." ) if mask.ndim != 3: raise ValueError("Mask is not three-dimensional.") if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx: if ( mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny ): mask = mask.swapaxes(0, 2) else: raise ValueError( "Mask dimensions do not match specified grid dimensions." ) mask = mask.flatten() npt = nz * ny * nx grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing="ij") xpts = grid_x.flatten() ypts = grid_y.flatten() zpts = grid_z.flatten() elif style == "points": if xpts.size != ypts.size and ypts.size != zpts.size: raise ValueError( "xpoints and ypoints must have same " "dimensions when treated as listing " "discrete points." ) npt = nx else: raise ValueError("style argument must be 'grid', 'points', or 'masked'") if specified_drift_arrays is None: specified_drift_arrays = [] spec_drift_grids = [] if self.specified_drift: if len(specified_drift_arrays) == 0: raise ValueError( "Must provide drift values for kriging " "points when using 'specified' drift " "capability." ) if type(specified_drift_arrays) is not list: raise TypeError( "Arrays for specified drift terms must " "be encapsulated in a list." ) for spec in specified_drift_arrays: if style in ["grid", "masked"]: if spec.ndim < 3: raise ValueError( "Dimensions of drift values array do " "not match specified grid dimensions." ) elif ( spec.shape[0] != nz or spec.shape[1] != ny or spec.shape[2] != nx ): if ( spec.shape[0] == nx and spec.shape[2] == nz and spec.shape[1] == ny ): spec_drift_grids.append(np.squeeze(spec.swapaxes(0, 2))) else: raise ValueError( "Dimensions of drift values array " "do not match specified grid " "dimensions." ) else: spec_drift_grids.append(np.squeeze(spec)) elif style == "points": if spec.ndim != 1: raise ValueError( "Dimensions of drift values array do " "not match specified grid dimensions." ) elif spec.shape[0] != xpts.size: raise ValueError( "Number of supplied drift values in " "array do not match specified number " "of kriging points." ) else: spec_drift_grids.append(np.squeeze(spec)) if len(spec_drift_grids) != len(self.specified_drift_data_arrays): raise ValueError( "Inconsistent number of specified drift terms supplied." ) else: if len(specified_drift_arrays) != 0: warnings.warn( "Provided specified drift values, but " "'specified' drift was not initialized during " "instantiation of UniversalKriging3D class.", RuntimeWarning, ) xpts, ypts, zpts = _adjust_for_anisotropy( np.vstack((xpts, ypts, zpts)).T, [self.XCENTER, self.YCENTER, self.ZCENTER], [self.anisotropy_scaling_y, self.anisotropy_scaling_z], [self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z], ).T if style != "masked": mask = np.zeros(npt, dtype="bool") xyz_points = np.concatenate( (zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1 ) xyz_data = np.concatenate( ( self.Z_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.X_ADJUSTED[:, np.newaxis], ), axis=1, ) bd = cdist(xyz_points, xyz_data, "euclidean") if backend == "vectorized": kvalues, sigmasq = self._exec_vector( a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids ) elif backend == "loop": kvalues, sigmasq = self._exec_loop( a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids ) else: raise ValueError( "Specified backend {} is not supported for " "3D ordinary kriging.".format(backend) ) if style == "masked": kvalues = np.ma.array(kvalues, mask=mask) sigmasq = np.ma.array(sigmasq, mask=mask) if style in ["masked", "grid"]: kvalues = kvalues.reshape((nz, ny, nx)) sigmasq = sigmasq.reshape((nz, ny, nx)) return kvalues, sigmasq
49,151
41.852659
88
py
PyKrige
PyKrige-main/src/pykrige/ok3d.py
""" PyKrige ======= Code by Benjamin S. Murphy and the PyKrige Developers bscott.murphy@gmail.com Summary ------- Contains class OrdinaryKriging3D. References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. Copyright (c) 2015-2020, PyKrige Developers """ import warnings import numpy as np import scipy.linalg from scipy.spatial.distance import cdist from . import core, variogram_models from .compat_gstools import validate_gstools from .core import ( P_INV, _adjust_for_anisotropy, _find_statistics, _initialize_variogram_model, _make_variogram_parameter_list, ) class OrdinaryKriging3D: """Three-dimensional ordinary kriging. Parameters ---------- x : array_like X-coordinates of data points. y : array_like Y-coordinates of data points. z : array_like Z-coordinates of data points. val : array_like Values at data points. variogram_model : str or GSTools CovModel, optional Specified which variogram model to use; may be one of the following: linear, power, gaussian, spherical, exponential, hole-effect. Default is linear variogram model. To utilize a custom variogram model, specify 'custom'; you must also provide variogram_parameters and variogram_function. Note that the hole-effect model is only technically correct for one-dimensional problems. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional Parameters that define the specified variogram model. If not provided, parameters will be automatically calculated using a "soft" L1 norm minimization scheme. For variogram model parameters provided in a dict, the required dict keys vary according to the specified variogram model: :: # linear {'slope': slope, 'nugget': nugget} # power {'scale': scale, 'exponent': exponent, 'nugget': nugget} # gaussian, spherical, exponential and hole-effect: {'sill': s, 'range': r, 'nugget': n} # OR {'psill': p, 'range': r, 'nugget': n} Note that either the full sill or the partial sill (psill = sill - nugget) can be specified in the dict. For variogram model parameters provided in a list, the entries must be as follows: :: # linear [slope, nugget] # power [scale, exponent, nugget] # gaussian, spherical, exponential and hole-effect: [sill, range, nugget] Note that the full sill (NOT the partial sill) must be specified in the list format. For a custom variogram model, the parameters are required, as custom variogram models will not automatically be fit to the data. Furthermore, the parameters must be specified in list format, in the order in which they are used in the callable function (see variogram_function for more information). The code does not check that the provided list contains the appropriate number of parameters for the custom variogram model, so an incorrect parameter list in such a case will probably trigger an esoteric exception someplace deep in the code. NOTE that, while the list format expects the full sill, the code itself works internally with the partial sill. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. The function must take only two arguments: first, a list of parameters for the variogram model; second, the distances at which to calculate the variogram model. The list provided in variogram_parameters will be passed to the function as the first argument. nlags : int, optional Number of averaging bins for the semivariogram. Default is 6. weight : boolean, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. The routine is currently hard-coded such that the weights are calculated from a logistic function, so weights at small lags are ~1 and weights at the longest lags are ~0; the center of the logistic weighting is hard-coded to be at 70% of the distance from the shortest lag to the largest lag. Setting this parameter to True indicates that weights will be applied. Default is False. (Kitanidis suggests that the values at smaller lags are more important in fitting a variogram model, so the option is provided to enable such weighting.) anisotropy_scaling_y : float, optional Scalar stretching value to take into account anisotropy in the y direction. Default is 1 (effectively no stretching). Scaling is applied in the y direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z is/are not 0). anisotropy_scaling_z : float, optional Scalar stretching value to take into account anisotropy in the z direction. Default is 1 (effectively no stretching). Scaling is applied in the z direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z is/are not 0). anisotropy_angle_x : float, optional CCW angle (in degrees) by which to rotate coordinate system about the x axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. anisotropy_angle_y : float, optional CCW angle (in degrees) by which to rotate coordinate system about the y axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. anisotropy_angle_z : float, optional CCW angle (in degrees) by which to rotate coordinate system about the z axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. verbose : bool, optional Enables program text output to monitor kriging process. Default is False (off). enable_plotting : bool, optional Enables plotting to display variogram. Default is False (off). exact_values : bool, optional If True, interpolation provides input values at input locations. If False, interpolation accounts for variance/nugget within input values at input locations and does not behave as an exact-interpolator [2]. Note that this only has an effect if there is variance/nugget present within the input data since it is interpreted as measurement error. If the nugget is zero, the kriged field will behave as an exact interpolator. pseudo_inv : :class:`bool`, optional Whether the kriging system is solved with the pseudo inverted kriging matrix. If `True`, this leads to more numerical stability and redundant points are averaged. But it can take more time. Default: False pseudo_inv_type : :class:`str`, optional Here you can select the algorithm to compute the pseudo-inverse matrix: * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values Default: `"pinv"` References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. .. [2] N. Cressie, Statistics for spatial data, (Wiley Series in Probability and Statistics, 1993) 137 p. """ eps = 1.0e-10 # Cutoff for comparison to zero variogram_dict = { "linear": variogram_models.linear_variogram_model, "power": variogram_models.power_variogram_model, "gaussian": variogram_models.gaussian_variogram_model, "spherical": variogram_models.spherical_variogram_model, "exponential": variogram_models.exponential_variogram_model, "hole-effect": variogram_models.hole_effect_variogram_model, } def __init__( self, x, y, z, val, variogram_model="linear", variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0, verbose=False, enable_plotting=False, exact_values=True, pseudo_inv=False, pseudo_inv_type="pinv", ): # config the pseudo inverse self.pseudo_inv = bool(pseudo_inv) self.pseudo_inv_type = str(pseudo_inv_type) if self.pseudo_inv_type not in P_INV: raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type)) # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None if not isinstance(exact_values, bool): raise ValueError("exact_values has to be boolean True or False") self.exact_values = exact_values # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim < 3: raise ValueError("GSTools: model dim is not 3") self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling_y = self.model.pykrige_anis_y anisotropy_scaling_z = self.model.pykrige_anis_z anisotropy_angle_x = self.model.pykrige_angle_x anisotropy_angle_y = self.model.pykrige_angle_y anisotropy_angle_z = self.model.pykrige_angle_z if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] # Code assumes 1D input arrays. Ensures that any extraneous dimensions # don't get in the way. Copies are created to avoid any problems with # referencing the original passed arguments. self.X_ORIG = np.atleast_1d( np.squeeze(np.array(x, copy=True, dtype=np.float64)) ) self.Y_ORIG = np.atleast_1d( np.squeeze(np.array(y, copy=True, dtype=np.float64)) ) self.Z_ORIG = np.atleast_1d( np.squeeze(np.array(z, copy=True, dtype=np.float64)) ) self.VALUES = np.atleast_1d( np.squeeze(np.array(val, copy=True, dtype=np.float64)) ) self.verbose = verbose self.enable_plotting = enable_plotting if self.enable_plotting and self.verbose: print("Plotting Enabled\n") self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0 self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0 self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG)) / 2.0 self.anisotropy_scaling_y = anisotropy_scaling_y self.anisotropy_scaling_z = anisotropy_scaling_z self.anisotropy_angle_x = anisotropy_angle_x self.anisotropy_angle_y = anisotropy_angle_y self.anisotropy_angle_z = anisotropy_angle_z if self.verbose: print("Adjusting data for anisotropy...") self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T, [self.XCENTER, self.YCENTER, self.ZCENTER], [self.anisotropy_scaling_y, self.anisotropy_scaling_z], [self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z], ).T if self.verbose: print("Initializing variogram model...") vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, "euclidean", ) if self.verbose: if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_function, self.variogram_model_parameters, "euclidean", self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") def update_variogram_model( self, variogram_model, variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0, ): """Changes the variogram model and variogram parameters for the kriging system. Parameters ---------- variogram_model : str or GSTools CovModel May be any of the variogram models listed above. May also be 'custom', in which case variogram_parameters and variogram_function must be specified. You can also use a `GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel. variogram_parameters : list or dict, optional List or dict of variogram model parameters, as explained above. If not provided, a best fit model will be calculated as described above. variogram_function : callable, optional A callable function that must be provided if variogram_model is specified as 'custom'. See above for more information. nlags : int, optional Number of averaging bins for the semivariogram. Default is 6. weight : bool, optional Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. See above for more information. True indicates that weights will be applied. Default is False. anisotropy_scaling_y : float, optional Scalar stretching value to take into account anisotropy in y-direction. Default is 1 (effectively no stretching). See above for more information. anisotropy_scaling_z : float, optional Scalar stretching value to take into account anisotropy in z-direction. Default is 1 (effectively no stretching). See above for more information. anisotropy_angle_x : float, optional Angle (in degrees) by which to rotate coordinate system about the x axis in order to take into account anisotropy. Default is 0 (no rotation). See above for more information. anisotropy_angle_y : float, optional Angle (in degrees) by which to rotate coordinate system about the y axis in order to take into account anisotropy. Default is 0 (no rotation). See above for more information. anisotropy_angle_z : float, optional Angle (in degrees) by which to rotate coordinate system about the z axis in order to take into account anisotropy. Default is 0 (no rotation). See above for more information. """ # set up variogram model and parameters... self.variogram_model = variogram_model self.model = None # check if a GSTools covariance model is given if hasattr(self.variogram_model, "pykrige_kwargs"): # save the model in the class self.model = self.variogram_model validate_gstools(self.model) if self.model.field_dim < 3: raise ValueError("GSTools: model dim is not 3") self.variogram_model = "custom" variogram_function = self.model.pykrige_vario variogram_parameters = [] anisotropy_scaling_y = self.model.pykrige_anis_y anisotropy_scaling_z = self.model.pykrige_anis_z anisotropy_angle_x = self.model.pykrige_angle_x anisotropy_angle_y = self.model.pykrige_angle_y anisotropy_angle_z = self.model.pykrige_angle_z if ( self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != "custom" ): raise ValueError( "Specified variogram model '%s' is not supported." % variogram_model ) elif self.variogram_model == "custom": if variogram_function is None or not callable(variogram_function): raise ValueError( "Must specify callable function for custom variogram model." ) else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] if ( anisotropy_scaling_y != self.anisotropy_scaling_y or anisotropy_scaling_z != self.anisotropy_scaling_z or anisotropy_angle_x != self.anisotropy_angle_x or anisotropy_angle_y != self.anisotropy_angle_y or anisotropy_angle_z != self.anisotropy_angle_z ): if self.verbose: print("Adjusting data for anisotropy...") self.anisotropy_scaling_y = anisotropy_scaling_y self.anisotropy_scaling_z = anisotropy_scaling_z self.anisotropy_angle_x = anisotropy_angle_x self.anisotropy_angle_y = anisotropy_angle_y self.anisotropy_angle_z = anisotropy_angle_z self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy( np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T, [self.XCENTER, self.YCENTER, self.ZCENTER], [self.anisotropy_scaling_y, self.anisotropy_scaling_z], [ self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z, ], ).T if self.verbose: print("Updating variogram mode...") vp_temp = _make_variogram_parameter_list( self.variogram_model, variogram_parameters ) ( self.lags, self.semivariance, self.variogram_model_parameters, ) = _initialize_variogram_model( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_model, vp_temp, self.variogram_function, nlags, weight, "euclidean", ) if self.verbose: if self.variogram_model == "linear": print("Using '%s' Variogram Model" % "linear") print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], "\n") elif self.variogram_model == "power": print("Using '%s' Variogram Model" % "power") print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") elif self.variogram_model == "custom": print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Partial Sill:", self.variogram_model_parameters[0]) print( "Full Sill:", self.variogram_model_parameters[0] + self.variogram_model_parameters[2], ) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], "\n") if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = _find_statistics( np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T, self.VALUES, self.variogram_function, self.variogram_model_parameters, "euclidean", self.pseudo_inv, ) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, "\n") def display_variogram_model(self): """Displays variogram model with the actual binned data.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(self.lags, self.semivariance, "r*") ax.plot( self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), "k-", ) plt.show() def switch_verbose(self): """Allows user to switch code talk-back on/off. Takes no arguments.""" self.verbose = not self.verbose def switch_plotting(self): """Allows user to switch plot display on/off. Takes no arguments.""" self.enable_plotting = not self.enable_plotting def get_epsilon_residuals(self): """Returns the epsilon residuals for the variogram fit.""" return self.epsilon def plot_epsilon_residuals(self): """Plots the epsilon residuals for the variogram fit.""" import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*") ax.axhline(y=0.0) plt.show() def get_statistics(self): """Returns the Q1, Q2, and cR statistics for the variogram fit (in that order). No arguments. """ return self.Q1, self.Q2, self.cR def print_statistics(self): """Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. """ print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR) def _get_kriging_matrix(self, n): """Assembles the kriging matrix.""" xyz = np.concatenate( ( self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.Z_ADJUSTED[:, np.newaxis], ), axis=1, ) d = cdist(xyz, xyz, "euclidean") a = np.zeros((n + 1, n + 1)) a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d) np.fill_diagonal(a, 0.0) a[n, :] = 1.0 a[:, n] = 1.0 a[n, n] = 0.0 return a def _exec_vector(self, a, bd, mask): """Solves the kriging system as a vectorized operation. This method can take a lot of memory for large grids and/or large datasets.""" npt = bd.shape[0] n = self.X_ADJUSTED.shape[0] zero_index = None zero_value = False # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) b = np.zeros((npt, n + 1, 1)) b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], zero_index[1], 0] = 0.0 b[:, n, 0] = 1.0 if (~mask).any(): mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n + 1, axis=1) b = np.ma.array(b, mask=mask_b) x = np.dot(a_inv, b.reshape((npt, n + 1)).T).reshape((1, n + 1, npt)).T kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1) sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1) return kvalues, sigmasq def _exec_loop(self, a, bd_all, mask): """Solves the kriging system by looping over all specified points. Less memory-intensive, but involves a Python-level loop.""" npt = bd_all.shape[0] n = self.X_ADJUSTED.shape[0] kvalues = np.zeros(npt) sigmasq = np.zeros(npt) # use the desired method to invert the kriging matrix if self.pseudo_inv: a_inv = P_INV[self.pseudo_inv_type](a) else: a_inv = scipy.linalg.inv(a) for j in np.nonzero(~mask)[ 0 ]: # Note that this is the same thing as range(npt) if mask is not defined, bd = bd_all[j] # otherwise it takes the non-masked elements. if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_value = False zero_index = None b = np.zeros((n + 1, 1)) b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = np.dot(a_inv, b) kvalues[j] = np.sum(x[:n, 0] * self.VALUES) sigmasq[j] = np.sum(x[:, 0] * -b[:, 0]) return kvalues, sigmasq def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx): """Solves the kriging system by looping over all specified points. Uses only a certain number of closest points. Not very memory intensive, but the loop is done in pure Python. """ import scipy.linalg.lapack npt = bd_all.shape[0] n = bd_idx.shape[1] kvalues = np.zeros(npt) sigmasq = np.zeros(npt) for i in np.nonzero(~mask)[0]: b_selector = bd_idx[i] bd = bd_all[i] a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1]))) a = a_all[a_selector[:, None], a_selector] if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_value = False zero_index = None b = np.zeros((n + 1, 1)) b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd) if zero_value and self.exact_values: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = scipy.linalg.solve(a, b) kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector]) sigmasq[i] = -x[:, 0].dot(b[:, 0]) return kvalues, sigmasq def execute( self, style, xpoints, ypoints, zpoints, mask=None, backend="vectorized", n_closest_points=None, ): """Calculates a kriged grid and the associated variance. This is now the method that performs the main kriging calculation. Note that currently measurements (i.e., z values) are considered 'exact'. This means that, when a specified coordinate for interpolation is exactly the same as one of the data points, the variogram evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is also always forced to be zero. In forcing the variogram evaluated at data points to be zero, we are effectively saying that there is no variance at that point (no uncertainty, so the value is 'exact'). In the future, the code may include an extra 'exact_values' boolean flag that can be adjusted to specify whether to treat the measurements as 'exact'. Setting the flag to false would indicate that the variogram should not be forced to be zero at zero distance (i.e., when evaluated at data points). Instead, the uncertainty in the point will be equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to the nugget instead of to zero. Parameters ---------- style : str Specifies how to treat input kriging points. Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid. Specifying 'points' treats xpoints, ypoints, and zpoints as arrays that provide coordinates at which to solve the kriging system. Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints : array_like, shape (N,) or (N, 1) If style is specific as 'grid' or 'masked', x-coordinates of LxMxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints : array-like, shape (M,) or (M, 1) If style is specified as 'grid' or 'masked', y-coordinates of LxMxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). zpoints : array-like, shape (L,) or (L, 1) If style is specified as 'grid' or 'masked', z-coordinates of LxMxN grid. If style is specified as 'points', z-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). mask : boolean array, shape (L, M, N), optional Specifies the points in the rectangular grid defined by xpoints, ypoints, zpoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system should will not be solved at the point. backend : str, optional Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. n_closest_points : int, optional For kriging with a moving window, specifies the number of nearby points to use in the calculation. This can speed up the calculation for large datasets, but should be used with caution. As Kitanidis notes, kriging with a moving window can produce unexpected oddities if the variogram model is not carefully chosen. Returns ------- kvalues : ndarray, shape (L, M, N) or (N, 1) Interpolated values of specified grid or at the specified set of points. If style was specified as 'masked', kvalues will be a numpy masked array. sigmasq : ndarray, shape (L, M, N) or (N, 1) Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. """ if self.verbose: print("Executing Ordinary Kriging...\n") if style != "grid" and style != "masked" and style != "points": raise ValueError("style argument must be 'grid', 'points', or 'masked'") xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True))) ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True))) zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True))) n = self.X_ADJUSTED.shape[0] nx = xpts.size ny = ypts.size nz = zpts.size a = self._get_kriging_matrix(n) if style in ["grid", "masked"]: if style == "masked": if mask is None: raise IOError( "Must specify boolean masking array when style is 'masked'." ) if mask.ndim != 3: raise ValueError("Mask is not three-dimensional.") if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx: if ( mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny ): mask = mask.swapaxes(0, 2) else: raise ValueError( "Mask dimensions do not match specified grid dimensions." ) mask = mask.flatten() npt = nz * ny * nx grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing="ij") xpts = grid_x.flatten() ypts = grid_y.flatten() zpts = grid_z.flatten() elif style == "points": if xpts.size != ypts.size and ypts.size != zpts.size: raise ValueError( "xpoints, ypoints, and zpoints must have " "same dimensions when treated as listing " "discrete points." ) npt = nx else: raise ValueError("style argument must be 'grid', 'points', or 'masked'") xpts, ypts, zpts = _adjust_for_anisotropy( np.vstack((xpts, ypts, zpts)).T, [self.XCENTER, self.YCENTER, self.ZCENTER], [self.anisotropy_scaling_y, self.anisotropy_scaling_z], [self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z], ).T if style != "masked": mask = np.zeros(npt, dtype="bool") xyz_points = np.concatenate( (zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1 ) xyz_data = np.concatenate( ( self.Z_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.X_ADJUSTED[:, np.newaxis], ), axis=1, ) bd = cdist(xyz_points, xyz_data, "euclidean") if n_closest_points is not None: from scipy.spatial import cKDTree tree = cKDTree(xyz_data) bd, bd_idx = tree.query(xyz_points, k=n_closest_points, eps=0.0) if backend == "loop": kvalues, sigmasq = self._exec_loop_moving_window(a, bd, mask, bd_idx) else: raise ValueError( "Specified backend '{}' not supported " "for moving window.".format(backend) ) else: if backend == "vectorized": kvalues, sigmasq = self._exec_vector(a, bd, mask) elif backend == "loop": kvalues, sigmasq = self._exec_loop(a, bd, mask) else: raise ValueError( "Specified backend {} is not supported for " "3D ordinary kriging.".format(backend) ) if style == "masked": kvalues = np.ma.array(kvalues, mask=mask) sigmasq = np.ma.array(sigmasq, mask=mask) if style in ["masked", "grid"]: kvalues = kvalues.reshape((nz, ny, nx)) sigmasq = sigmasq.reshape((nz, ny, nx)) return kvalues, sigmasq
39,816
41.676313
88
py
PyKrige
PyKrige-main/src/pykrige/rk.py
"""Regression Kriging.""" from pykrige.compat import Krige, check_sklearn_model, validate_sklearn validate_sklearn() from sklearn.metrics import r2_score from sklearn.svm import SVR class RegressionKriging: """ An implementation of Regression-Kriging. As described here: https://en.wikipedia.org/wiki/Regression-Kriging Parameters ---------- regression_model: machine learning model instance from sklearn method: str, optional type of kriging to be performed variogram_model: str, optional variogram model to be used during Kriging n_closest_points: int number of closest points to be used during Ordinary Kriging nlags: int see OK/UK class description weight: bool see OK/UK class description verbose: bool see OK/UK class description exact_values : bool see OK/UK class description variogram_parameters : list or dict see OK/UK class description variogram_function : callable see OK/UK class description anisotropy_scaling : tuple single value for 2D (UK/OK) and two values in 3D (UK3D/OK3D) anisotropy_angle : tuple single value for 2D (UK/OK) and three values in 3D (UK3D/OK3D) enable_statistics : bool see OK class description coordinates_type : str see OK/UK class description drift_terms : list of strings see UK/UK3D class description point_drift : array_like see UK class description ext_drift_grid : tuple Holding the three values external_drift, external_drift_x and external_drift_z for the UK class functional_drift : list of callable see UK/UK3D class description """ def __init__( self, regression_model=SVR(), method="ordinary", variogram_model="linear", n_closest_points=10, nlags=6, weight=False, verbose=False, exact_values=True, pseudo_inv=False, pseudo_inv_type="pinv", variogram_parameters=None, variogram_function=None, anisotropy_scaling=(1.0, 1.0), anisotropy_angle=(0.0, 0.0, 0.0), enable_statistics=False, coordinates_type="euclidean", drift_terms=None, point_drift=None, ext_drift_grid=(None, None, None), functional_drift=None, ): check_sklearn_model(regression_model) self.regression_model = regression_model self.n_closest_points = n_closest_points self.krige = Krige( method=method, variogram_model=variogram_model, nlags=nlags, weight=weight, n_closest_points=n_closest_points, verbose=verbose, exact_values=exact_values, pseudo_inv=pseudo_inv, pseudo_inv_type=pseudo_inv_type, variogram_parameters=variogram_parameters, variogram_function=variogram_function, anisotropy_scaling=anisotropy_scaling, anisotropy_angle=anisotropy_angle, enable_statistics=enable_statistics, coordinates_type=coordinates_type, drift_terms=drift_terms, point_drift=point_drift, ext_drift_grid=ext_drift_grid, functional_drift=functional_drift, ) def fit(self, p, x, y): """ Fit the regression method and also Krige the residual. Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example 2d regression kriging. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging y: ndarray array of targets (Ns, ) """ self.regression_model.fit(p, y) ml_pred = self.regression_model.predict(p) print("Finished learning regression model") # residual=y-ml_pred self.krige.fit(x=x, y=y - ml_pred) print("Finished kriging residuals") def predict(self, p, x, **kwargs): """ Predict. Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging Returns ------- pred: ndarray The expected value of ys for the query inputs, of shape (Ns,). """ return self.krige_residual(x, **kwargs) + self.regression_model.predict(p) def krige_residual(self, x, **kwargs): """ Calculate the residuals. Parameters ---------- x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. Returns ------- residual: ndarray kriged residual values """ return self.krige.predict(x, **kwargs) def score(self, p, x, y, sample_weight=None, **kwargs): """ Overloading default regression score method. Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging y: ndarray array of targets (Ns, ) """ return r2_score( y_pred=self.predict(p, x, **kwargs), y_true=y, sample_weight=sample_weight )
5,982
30.994652
86
py
PyKrige
PyKrige-main/src/pykrige/variogram_models.py
""" PyKrige ======= Code by Benjamin S. Murphy and the PyKrige Developers bscott.murphy@gmail.com Summary ------- Function definitions for variogram models. In each function, m is a list of defining parameters and d is an array of the distance values at which to calculate the variogram model. References ---------- .. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. Copyright (c) 2015-2020, PyKrige Developers """ import numpy as np def linear_variogram_model(m, d): """Linear model, m is [slope, nugget]""" slope = float(m[0]) nugget = float(m[1]) return slope * d + nugget def power_variogram_model(m, d): """Power model, m is [scale, exponent, nugget]""" scale = float(m[0]) exponent = float(m[1]) nugget = float(m[2]) return scale * d**exponent + nugget def gaussian_variogram_model(m, d): """Gaussian model, m is [psill, range, nugget]""" psill = float(m[0]) range_ = float(m[1]) nugget = float(m[2]) return psill * (1.0 - np.exp(-(d**2.0) / (range_ * 4.0 / 7.0) ** 2.0)) + nugget def exponential_variogram_model(m, d): """Exponential model, m is [psill, range, nugget]""" psill = float(m[0]) range_ = float(m[1]) nugget = float(m[2]) return psill * (1.0 - np.exp(-d / (range_ / 3.0))) + nugget def spherical_variogram_model(m, d): """Spherical model, m is [psill, range, nugget]""" psill = float(m[0]) range_ = float(m[1]) nugget = float(m[2]) return np.piecewise( d, [d <= range_, d > range_], [ lambda x: psill * ((3.0 * x) / (2.0 * range_) - (x**3.0) / (2.0 * range_**3.0)) + nugget, psill + nugget, ], ) def hole_effect_variogram_model(m, d): """Hole Effect model, m is [psill, range, nugget]""" psill = float(m[0]) range_ = float(m[1]) nugget = float(m[2]) return ( psill * (1.0 - (1.0 - d / (range_ / 3.0)) * np.exp(-d / (range_ / 3.0))) + nugget )
2,092
24.52439
83
py
PyKrige
PyKrige-main/src/pykrige/ck.py
"""Classification Kriging.""" import numpy as np from pykrige.compat import Krige, check_sklearn_model, validate_sklearn validate_sklearn() from scipy.linalg import helmert from sklearn.metrics import accuracy_score from sklearn.preprocessing import OneHotEncoder from sklearn.svm import SVC class ClassificationKriging: """ An implementation of Simplicial Indicator Kriging applied to classification ilr transformed residuals. Parameters ---------- classification_model: machine learning model instance from sklearn method: str, optional type of kriging to be performed variogram_model: str, optional variogram model to be used during Kriging n_closest_points: int number of closest points to be used during Ordinary Kriging nlags: int see OK/UK class description weight: bool see OK/UK class description verbose: bool see OK/UK class description exact_values : bool see OK/UK class description variogram_parameters : list or dict see OK/UK class description variogram_function : callable see OK/UK class description anisotropy_scaling : tuple single value for 2D (UK/OK) and two values in 3D (UK3D/OK3D) anisotropy_angle : tuple single value for 2D (UK/OK) and three values in 3D (UK3D/OK3D) enable_statistics : bool see OK class description coordinates_type : str see OK/UK class description drift_terms : list of strings see UK/UK3D class description point_drift : array_like see UK class description ext_drift_grid : tuple Holding the three values external_drift, external_drift_x and external_drift_z for the UK class functional_drift : list of callable see UK/UK3D class description """ def __init__( self, classification_model=SVC(), method="ordinary", variogram_model="linear", n_closest_points=10, nlags=6, weight=False, verbose=False, exact_values=True, pseudo_inv=False, pseudo_inv_type="pinv", variogram_parameters=None, variogram_function=None, anisotropy_scaling=(1.0, 1.0), anisotropy_angle=(0.0, 0.0, 0.0), enable_statistics=False, coordinates_type="euclidean", drift_terms=None, point_drift=None, ext_drift_grid=(None, None, None), functional_drift=None, ): check_sklearn_model(classification_model, task="classification") self.classification_model = classification_model self.n_closest_points = n_closest_points self._kriging_kwargs = dict( method=method, variogram_model=variogram_model, nlags=nlags, weight=weight, n_closest_points=n_closest_points, verbose=verbose, exact_values=exact_values, pseudo_inv=pseudo_inv, pseudo_inv_type=pseudo_inv_type, variogram_parameters=variogram_parameters, variogram_function=variogram_function, anisotropy_scaling=anisotropy_scaling, anisotropy_angle=anisotropy_angle, enable_statistics=enable_statistics, coordinates_type=coordinates_type, drift_terms=drift_terms, point_drift=point_drift, ext_drift_grid=ext_drift_grid, functional_drift=functional_drift, ) def fit(self, p, x, y): """ Fit the classification method and also krige the residual. Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for classification x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example 2d classification kriging. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging y: ndarray array of targets (Ns, ) """ self.classification_model.fit(p, y.ravel()) print("Finished learning classification model") self.classes_ = self.classification_model.classes_ self.krige = [] for i in range(len(self.classes_) - 1): self.krige.append(Krige(**self._kriging_kwargs)) ml_pred = self.classification_model.predict_proba(p) ml_pred_ilr = ilr_transformation(ml_pred) self.onehotencode = OneHotEncoder(categories=[self.classes_]) y_ohe = np.array(self.onehotencode.fit_transform(y).todense()) y_ohe_ilr = ilr_transformation(y_ohe) for i in range(len(self.classes_) - 1): self.krige[i].fit(x=x, y=y_ohe_ilr[:, i] - ml_pred_ilr[:, i]) print("Finished kriging residuals") def predict(self, p, x, **kwargs): """ Predict. Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for classification x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging Returns ------- pred: ndarray The expected value of ys for the query inputs, of shape (Ns,). """ ml_pred = self.classification_model.predict_proba(p) ml_pred_ilr = ilr_transformation(ml_pred) pred_proba_ilr = self.krige_residual(x, **kwargs) + ml_pred_ilr pred_proba = inverse_ilr_transformation(pred_proba_ilr) return np.argmax(pred_proba, axis=1) def krige_residual(self, x, **kwargs): """ Calculate the residuals. Parameters ---------- x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. Returns ------- residual: ndarray kriged residual values """ krig_pred = [ self.krige[i].predict(x=x, **kwargs) for i in range(len(self.classes_) - 1) ] return np.vstack(krig_pred).T def score(self, p, x, y, sample_weight=None, **kwargs): """ Overloading default classification score method. Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for classification x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging y: ndarray array of targets (Ns, ) """ return accuracy_score( y_pred=self.predict(p, x, **kwargs), y_true=y, sample_weight=sample_weight ) def closure(data, k=1.0): """Apply closure to data, sample-wise. Adapted from https://github.com/ofgulban/compoda. Parameters ---------- data : 2d numpy array, shape [n_samples, n_measurements] Data to be closed to a certain constant. Do not forget to deal with zeros in the data before this operation. k : float, positive Sum of the measurements will be equal to this number. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] Pawlowsky-Glahn, V., Egozcue, J. J., & Tolosana-Delgado, R. (2015). Modelling and Analysis of Compositional Data, pg. 9. Chichester, UK: John Wiley & Sons, Ltd. DOI: 10.1002/9781119003144 """ return k * data / np.sum(data, axis=1)[:, np.newaxis] def ilr_transformation(data): """Isometric logratio transformation (not vectorized). Adapted from https://github.com/ofgulban/compoda. Parameters ---------- data : 2d numpy array, shape [n_samples, n_coordinates] Barycentric coordinates (closed) in simplex space. Returns ------- out : 2d numpy array, shape [n_samples, n_coordinates-1] Coordinates in real space. Reference --------- [1] Pawlowsky-Glahn, V., Egozcue, J. J., & Tolosana-Delgado, R. (2015). Modelling and Analysis of Compositional Data, pg. 37. Chichester, UK: John Wiley & Sons, Ltd. DOI: 10.1002/9781119003144 """ data = np.maximum(data, np.finfo(float).eps) return np.einsum("ij,jk->ik", np.log(data), -helmert(data.shape[1]).T) def inverse_ilr_transformation(data): """Inverse isometric logratio transformation (not vectorized). Adapted from https://github.com/ofgulban/compoda. Parameters ---------- data : 2d numpy array, shape [n_samples, n_coordinates] Isometric log-ratio transformed coordinates in real space. Returns ------- out : 2d numpy array, shape [n_samples, n_coordinates+1] Barycentric coordinates (closed) in simplex space. Reference --------- [1] Pawlowsky-Glahn, V., Egozcue, J. J., & Tolosana-Delgado, R. (2015). Modelling and Analysis of Compositional Data, pg. 37. Chichester, UK: John Wiley & Sons, Ltd. DOI: 10.1002/9781119003144 """ return closure(np.exp(np.einsum("ij,jk->ik", data, -helmert(data.shape[1] + 1))))
9,458
31.393836
106
py
PyKrige
PyKrige-main/src/pykrige/compat.py
# pylint: disable= invalid-name, unused-import """For compatibility.""" from pykrige.ok import OrdinaryKriging from pykrige.ok3d import OrdinaryKriging3D from pykrige.uk import UniversalKriging from pykrige.uk3d import UniversalKriging3D # sklearn try: # keep train_test_split here for backward compatibility from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin from sklearn.model_selection import train_test_split SKLEARN_INSTALLED = True except ImportError: SKLEARN_INSTALLED = False train_test_split = None class RegressorMixin: """Mock RegressorMixin.""" class ClassifierMixin: """Mock ClassifierMixin.""" class BaseEstimator: """Mock BaseEstimator.""" krige_methods = { "ordinary": OrdinaryKriging, "universal": UniversalKriging, "ordinary3d": OrdinaryKriging3D, "universal3d": UniversalKriging3D, } threed_krige = ("ordinary3d", "universal3d") krige_methods_kws = { "ordinary": [ "anisotropy_scaling", "anisotropy_angle", "enable_statistics", "coordinates_type", ], "universal": [ "anisotropy_scaling", "anisotropy_angle", "drift_terms", "point_drift", "external_drift", "external_drift_x", "external_drift_y", "functional_drift", ], "ordinary3d": [ "anisotropy_scaling_y", "anisotropy_scaling_z", "anisotropy_angle_x", "anisotropy_angle_y", "anisotropy_angle_z", ], "universal3d": [ "anisotropy_scaling_y", "anisotropy_scaling_z", "anisotropy_angle_x", "anisotropy_angle_y", "anisotropy_angle_z", "drift_terms", "functional_drift", ], } class SklearnException(Exception): """Exception for missing scikit-learn.""" def validate_method(method): """Validate the kriging method in use.""" if method not in krige_methods.keys(): raise ValueError( "Kriging method must be one of {}".format(krige_methods.keys()) ) def validate_sklearn(): """Validate presence of scikit-learn.""" if not SKLEARN_INSTALLED: raise SklearnException( "sklearn needs to be installed in order to use this module" ) class Krige(RegressorMixin, BaseEstimator): """ A scikit-learn wrapper class for Ordinary and Universal Kriging. This works with both Grid/RandomSearchCv for finding the best Krige parameters combination for a problem. Parameters ---------- method: str, optional type of kriging to be performed variogram_model: str, optional variogram model to be used during Kriging nlags: int see OK/UK class description weight: bool see OK/UK class description n_closest_points: int number of closest points to be used during Ordinary Kriging verbose: bool see OK/UK class description exact_values : bool see OK/UK class description variogram_parameters : list or dict see OK/UK class description variogram_function : callable see OK/UK class description anisotropy_scaling : tuple single value for 2D (UK/OK) and two values in 3D (UK3D/OK3D) anisotropy_angle : tuple single value for 2D (UK/OK) and three values in 3D (UK3D/OK3D) enable_statistics : bool see OK class description coordinates_type : str see OK/UK class description drift_terms : list of strings see UK/UK3D class description point_drift : array_like see UK class description ext_drift_grid : tuple Holding the three values external_drift, external_drift_x and external_drift_z for the UK class functional_drift : list of callable see UK/UK3D class description """ def __init__( self, method="ordinary", variogram_model="linear", nlags=6, weight=False, n_closest_points=10, verbose=False, exact_values=True, pseudo_inv=False, pseudo_inv_type="pinv", variogram_parameters=None, variogram_function=None, anisotropy_scaling=(1.0, 1.0), anisotropy_angle=(0.0, 0.0, 0.0), enable_statistics=False, coordinates_type="euclidean", drift_terms=None, point_drift=None, ext_drift_grid=(None, None, None), functional_drift=None, ): validate_method(method) self.variogram_model = variogram_model self.variogram_parameters = variogram_parameters self.variogram_function = variogram_function self.nlags = nlags self.weight = weight self.verbose = verbose self.exact_values = exact_values self.pseudo_inv = pseudo_inv self.pseudo_inv_type = pseudo_inv_type self.anisotropy_scaling = anisotropy_scaling self.anisotropy_angle = anisotropy_angle self.enable_statistics = enable_statistics self.coordinates_type = coordinates_type self.drift_terms = drift_terms self.point_drift = point_drift self.ext_drift_grid = ext_drift_grid self.functional_drift = functional_drift self.model = None # not trained self.n_closest_points = n_closest_points self.method = method def fit(self, x, y, *args, **kwargs): """ Fit the current model. Parameters ---------- x: ndarray array of Points, (x, y) pairs of shape (N, 2) for 2d kriging array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging y: ndarray array of targets (N, ) """ val_kw = "val" if self.method in threed_krige else "z" setup = dict( variogram_model=self.variogram_model, variogram_parameters=self.variogram_parameters, variogram_function=self.variogram_function, nlags=self.nlags, weight=self.weight, verbose=self.verbose, exact_values=self.exact_values, pseudo_inv=self.pseudo_inv, pseudo_inv_type=self.pseudo_inv_type, ) add_setup = dict( anisotropy_scaling=self.anisotropy_scaling[0], anisotropy_angle=self.anisotropy_angle[0], enable_statistics=self.enable_statistics, coordinates_type=self.coordinates_type, anisotropy_scaling_y=self.anisotropy_scaling[0], anisotropy_scaling_z=self.anisotropy_scaling[1], anisotropy_angle_x=self.anisotropy_angle[0], anisotropy_angle_y=self.anisotropy_angle[1], anisotropy_angle_z=self.anisotropy_angle[2], drift_terms=self.drift_terms, point_drift=self.point_drift, external_drift=self.ext_drift_grid[0], external_drift_x=self.ext_drift_grid[1], external_drift_y=self.ext_drift_grid[2], functional_drift=self.functional_drift, ) for kw in krige_methods_kws[self.method]: setup[kw] = add_setup[kw] input_kw = self._dimensionality_check(x) input_kw.update(setup) input_kw[val_kw] = y self.model = krige_methods[self.method](**input_kw) def _dimensionality_check(self, x, ext=""): if self.method in ("ordinary", "universal"): if x.shape[1] != 2: raise ValueError("2d krige can use only 2d points") else: return {"x" + ext: x[:, 0], "y" + ext: x[:, 1]} if self.method in ("ordinary3d", "universal3d"): if x.shape[1] != 3: raise ValueError("3d krige can use only 3d points") else: return { "x" + ext: x[:, 0], "y" + ext: x[:, 1], "z" + ext: x[:, 2], } def predict(self, x, *args, **kwargs): """ Predict. Parameters ---------- x: ndarray array of Points, (x, y) pairs of shape (N, 2) for 2d kriging array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging Returns ------- Prediction array """ if not self.model: raise Exception("Not trained. Train first") points = self._dimensionality_check(x, ext="points") return self.execute(points, *args, **kwargs)[0] def execute(self, points, *args, **kwargs): # TODO array of Points, (x, y) pairs of shape (N, 2) """ Execute. Parameters ---------- points: dict Returns ------- Prediction array Variance array """ default_kw = dict(style="points", backend="loop") default_kw.update(kwargs) points.update(default_kw) if isinstance(self.model, (OrdinaryKriging, OrdinaryKriging3D)): points.update(dict(n_closest_points=self.n_closest_points)) else: print("n_closest_points will be ignored for UniversalKriging") prediction, variance = self.model.execute(**points) return prediction, variance def check_sklearn_model(model, task="regression"): """Check the sklearn method in use.""" if task == "regression": if not (isinstance(model, BaseEstimator) and isinstance(model, RegressorMixin)): raise RuntimeError( "Needs to supply an instance of a scikit-learn regression class." ) elif task == "classification": if not ( isinstance(model, BaseEstimator) and isinstance(model, ClassifierMixin) ): raise RuntimeError( "Needs to supply an instance of a scikit-learn classification class." )
9,889
31.11039
88
py