code
stringlengths
1
5.19M
package
stringlengths
1
81
path
stringlengths
9
304
filename
stringlengths
4
145
########## IMPORT ########## from AAM.plots_class import _plots import pickle ########## CONVENTIONAL ARCHETYPAL ANALYSIS RESULT ########## class _CAA_result: plots = _plots() def __init__(self, A, B, X, X_hat, n_iter, RSS, Z, K, p, time, columns,type, with_synthetic_data = False): self.A = A self.B = B self.X = X self.X_hat = X_hat self.n_iter = len(RSS) self.loss = RSS self.Z = Z self.K = K self.p = p self.time = time self.columns = columns self.type = type self.with_synthetic_data = with_synthetic_data self.N = len(self.X[0,:]) def _print(self): if self.type == "CAA": type_name = "Conventional Archetypal" else: type_name = "Two Step Archetypal" print("/////////////// INFORMATION ABOUT " + type_name.upper() + " ANALYSIS \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\") print(f"▣ The " + type_name + " Analysis was computed using " + str(self.K) + " archetypes.") print(f"▣ The " + type_name + " Analysis was computed on " + str(len(self.X)) + " attributes.") print(f"▣ The " + type_name + " Analysis was computed on " + str(self.N) + " subjects.") print(f"▣ The " + type_name + " Analysis ran for " + str(self.n_iter) + " iterations.") print(f"▣ The " + type_name + " Analysis took " + str(self.time) + " seconds to complete.") print(f"▣ The final RSS was: {self.loss[-1]}.") def _plot(self,plot_type, attributes, archetype_number, types, weighted, subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title): if plot_type == "PCA_scatter_plot": self.plots._PCA_scatter_plot(self.Z,self.X_hat,self.type, save_figure, filename,title) elif plot_type == "attribute_scatter_plot": self.plots._attribute_scatter_plot(self.Z,self.X_hat,attributes,self.type,self.p, save_figure, filename,title) elif plot_type == "loss_plot": self.plots._loss_plot(self.loss,self.type, save_figure, filename,title) elif plot_type == "mixture_plot": self.plots._mixture_plot(self.Z,self.A,self.type, save_figure, filename,title) elif plot_type == "barplot": self.plots._barplot(self.Z,self.columns,archetype_number,self.type, self.p, save_figure, filename,title) elif plot_type == "barplot_all": self.plots._barplot_all(self.Z,self.columns, self.type, self.p, save_figure, filename,title) elif plot_type == "typal_plot": self.plots._typal_plot(self.Z,types,weighted, save_figure, filename,title) elif plot_type == "pie_chart": self.plots._pie_chart(self.A, subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title) elif plot_type == "attribute_distribution": self.plots._attribute_distribution(self.A,self.Z,subject_indexes,self.columns,self.p,self.type, attribute_indexes, archetype_dataframe, save_figure, filename,title) elif plot_type == "circular_typal_barplot": self.plots._circular_typal_barplot(self.type,self.Z, types, archetype_number,self.columns,self.p, save_figure, filename, title) def _save(self,filename): if not self.with_synthetic_data: file = open("results/" + self.type + "_" + filename + '.obj','wb') else: file = open("synthetic_results/" + self.type + "_" + filename + '.obj','wb') pickle.dump(self, file) file.close() ########## ORDINAL ARCHETYPAL ANALYSIS RESULT ########## class _OAA_result: plots = _plots() def __init__(self, A, B, X, n_iter, b, Z, X_tilde, Z_tilde, X_hat, loss, K, p, time, columns,type,sigma, with_synthetic_data = False): self.A = A self.B = B self.X = X self.n_iter = len(loss) self.b = b self.sigma = sigma self.X_tilde = X_tilde self.Z_tilde = Z_tilde self.X_hat = X_hat self.loss = loss self.Z = Z self.K = K self.p = p self.time = time self.columns = columns self.type = type self.with_synthetic_data = with_synthetic_data self.N = len(self.X[0,:]) def _print(self): if self.type == "RBOAA": type_name = "Response Bias Ordinal Archetypal" else: type_name = "Ordinal Archetypal" print("/////////////// INFORMATION ABOUT " + type_name.upper() + " ANALYSIS \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\") print(f"▣ The " + type_name + " Analysis was computed using " + str(self.K) + " archetypes.") print(f"▣ The " + type_name + " Analysis was computed on " + str(len(self.X)) + " attributes.") print(f"▣ The " + type_name + " Analysis was computed on " + str(self.N) + " subjects.") print(f"▣ The " + type_name + " Analysis ran for " + str(self.n_iter) + " iterations.") print(f"▣ The " + type_name + " Analysis took " + str(self.time) + " seconds to complete.") print(f"▣ The final loss was: {self.loss[-1]}.") def _plot(self,plot_type, attributes, archetype_number, types, weighted, subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title): if plot_type == "PCA_scatter_plot": self.plots._PCA_scatter_plot(self.Z,self.X_hat,self.type, save_figure, filename,title) elif plot_type == "attribute_scatter_plot": self.plots._attribute_scatter_plot(self.Z,self.X_hat,attributes,self.type, self.p, save_figure, filename,title) elif plot_type == "loss_plot": self.plots._loss_plot(self.loss,self.type, save_figure, filename,title) elif plot_type == "mixture_plot": self.plots._mixture_plot(self.Z,self.A,self.type, save_figure, filename,title) elif plot_type == "barplot": self.plots._barplot(self.Z,self.columns,archetype_number,self.type,self.p, save_figure, filename,title) elif plot_type == "barplot_all": self.plots._barplot_all(self.Z,self.columns, self.type, self.p, save_figure, filename,title) elif plot_type == "typal_plot": self.plots._typal_plot(self.Z,types,weighted, save_figure, filename,title) elif plot_type == "pie_chart": self.plots._pie_chart(self.A,subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title) elif plot_type == "attribute_distribution": self.plots._attribute_distribution(self.A,self.Z,subject_indexes,self.columns,self.p,self.type,attribute_indexes, archetype_dataframe, save_figure, filename,title) elif plot_type == "circular_typal_barplot": self.plots._circular_typal_barplot(self.type,self.Z, types, archetype_number,self.columns,self.p,save_figure, filename, title) def _save(self,filename): if not self.with_synthetic_data: file = open("results/" + self.type + "_" + filename + '.obj','wb') else: file = open("synthetic_results/" + self.type + "_" + filename + '.obj','wb') pickle.dump(self, file) file.close()
AA-module
/AA_module-1.2.0-py3-none-any.whl/AAM/AA_result_class.py
AA_result_class.py
########## IMPORTS ########## import numpy as np import torch import torch.nn as nn import torch.optim as optim from timeit import default_timer as timer from AAM.AA_result_class import _OAA_result from AAM.loading_bar_class import _loading_bar ########## ORDINAL ARCHETYPAL ANALYSIS CLASS ########## class _OAA: ########## HELPER FUNCTION // EARLY STOPPING ########## def _early_stopping(self): next_imp = self.loss[-round(len(self.loss)/100)]-self.loss[-1] prev_imp = (self.loss[0]-self.loss[-1])*1e-5 return next_imp < prev_imp ########## HELPER FUNCTION // A AND B ########## def _apply_constraints_AB(self,A): m = nn.Softmax(dim=1) return m(A) ########## HELPER FUNCTION // BETAS ########## def _apply_constraints_beta(self,b): m = nn.Softmax(dim=0) return torch.cumsum(m(b), dim=0)[:len(b)-1] ########## HELPER FUNCTION // SIGMA ########## def _apply_constraints_sigma(self,sigma): m = nn.Softplus() return m(sigma) ########## HELPER FUNCTION // ALPHA ########## def _calculate_alpha(self,b): b_j = torch.cat((torch.tensor([0.0]),b),0) b_j_plus1 = torch.cat((b,torch.tensor([1.0])),0) alphas = (b_j_plus1+b_j)/2 return alphas ########## HELPER FUNCTION // X_tilde ########## def _calculate_X_tilde(self,X,alphas): X_tilde = alphas[X.long()-1] return X_tilde ########## HELPER FUNCTION // X_hat ########## def _calculate_X_hat(self,X_tilde,A,B): Z = B @ X_tilde X_hat = A @ Z return X_hat ########## HELPER FUNCTION // LOSS ########## def _calculate_loss(self,Xt, X_hat, b, sigma): pad = nn.ConstantPad1d(1, 0) b = pad(b) b[-1] = 1.0 z_next = (b[Xt] - X_hat)/sigma z_prev = (b[Xt-1] - X_hat)/sigma z_next[Xt == len(b)+1] = np.inf z_prev[Xt == 1] = -np.inf P_next = torch.distributions.normal.Normal(0, 1).cdf(z_next) P_prev = torch.distributions.normal.Normal(0, 1).cdf(z_prev) neg_logP = -torch.log(( P_next - P_prev ) +1e-10) loss = torch.sum(neg_logP) return loss ########## HELPER FUNCTION // ERROR ########## def _error(self,Xt,A_non_constraint,B_non_constraint,b_non_constraint,sigma_non_constraint): A = self._apply_constraints_AB(A_non_constraint) B = self._apply_constraints_AB(B_non_constraint) b = self._apply_constraints_beta(b_non_constraint) sigma = self._apply_constraints_sigma(sigma_non_constraint) alphas = self._calculate_alpha(b) X_tilde = self._calculate_X_tilde(Xt,alphas) X_hat = self._calculate_X_hat(X_tilde,A,B) loss = self._calculate_loss(Xt, X_hat, b, sigma) return loss ########## COMPUTE ARCHETYPES FUNCTION OF OAA ########## def _compute_archetypes( self, X, K, p, n_iter, lr, mute, columns, with_synthetic_data = False, early_stopping = False, for_hotstart_usage = False): ########## INITIALIZATION ########## self.N, self.M = len(X.T), len(X.T[0,:]) Xt = torch.tensor(X.T, dtype = torch.long) self.loss = [] start = timer() A_non_constraint = torch.autograd.Variable(torch.randn(self.N, K), requires_grad=True) B_non_constraint = torch.autograd.Variable(torch.randn(K, self.N), requires_grad=True) b_non_constraint = torch.autograd.Variable(torch.rand(p), requires_grad=True) sigma_non_constraint = torch.autograd.Variable(torch.rand(1), requires_grad=True) optimizer = optim.Adam([A_non_constraint, B_non_constraint, b_non_constraint, sigma_non_constraint], amsgrad = True, lr = lr) if not mute: loading_bar = _loading_bar(n_iter, "Ordinal Archetypal Analysis") ########## ANALYSIS ########## for i in range(n_iter): if not mute: loading_bar._update() optimizer.zero_grad() L = self._error(Xt,A_non_constraint,B_non_constraint,b_non_constraint,sigma_non_constraint) self.loss.append(L.detach().numpy()) L.backward() optimizer.step() ########## EARLY STOPPING ########## if i % 25 == 0 and early_stopping: if len(self.loss) > 200 and self._early_stopping(): if not mute: loading_bar._kill() print("Analysis ended due to early stopping.\n") break ########## POST ANALYSIS ########## A_f = self._apply_constraints_AB(A_non_constraint).detach().numpy() B_f = self._apply_constraints_AB(B_non_constraint).detach().numpy() b_f = self._apply_constraints_beta(b_non_constraint) alphas_f = self._calculate_alpha(b_f) X_tilde_f = self._calculate_X_tilde(Xt,alphas_f).detach().numpy() Z_tilde_f = (self._apply_constraints_AB(B_non_constraint).detach().numpy() @ X_tilde_f) sigma_f = self._apply_constraints_sigma(sigma_non_constraint).detach().numpy() X_hat_f = self._calculate_X_hat(X_tilde_f,A_f,B_f) end = timer() time = round(end-start,2) Z_f = B_f @ X_tilde_f ########## CREATE RESULT INSTANCE ########## result = _OAA_result( A_f.T, B_f.T, X, n_iter, b_f.detach().numpy(), Z_f.T, X_tilde_f.T, Z_tilde_f.T, X_hat_f.T, self.loss, K, p, time, columns, "OAA", sigma_f, with_synthetic_data=with_synthetic_data) if not mute: result._print() ########## RETURN RESULT IF REGULAR, RETURN MATRICIES IF HOTSTART USAGE ########## if not for_hotstart_usage: return result else: A_non_constraint_np = A_non_constraint.detach().numpy() B_non_constraint_np = B_non_constraint.detach().numpy() sigma_non_constraint_np = sigma_non_constraint.detach().numpy() b_non_constraint_np = b_non_constraint.detach().numpy() return A_non_constraint_np, B_non_constraint_np, sigma_non_constraint_np, b_non_constraint_np
AA-module
/AA_module-1.2.0-py3-none-any.whl/AAM/OAA_class.py
OAA_class.py
########## IMPORTS ########## import torch import torch.nn as nn import torch.optim as optim from timeit import default_timer as timer from AAM.loading_bar_class import _loading_bar from AAM.AA_result_class import _CAA_result ########## CONVENTIONAL ARCHETYPAL ANALYSIS CLASS ########## class _CAA: ########## HELPER FUNCTION // EARLY STOPPING ########## def _early_stopping(self): next_imp = self.RSS[-round(len(self.RSS)/100)]-self.RSS[-1] prev_imp = (self.RSS[0]-self.RSS[-1])*1e-5 return next_imp < prev_imp ########## HELPER FUNCTION // CALCULATE ERROR FOR EACH ITERATION ########## def _error(self, X,B,A): return torch.norm(X - X@B@A, p='fro')**2 ########## HELPER FUNCTION // A CONSTRAINTS ########## def _apply_constraints(self, A): m = nn.Softmax(dim=0) return m(A) ########## COMPUTE ARCHETYPES FUNCTION OF CAA ########## def _compute_archetypes(self, X, K, p, n_iter, lr, mute,columns,with_synthetic_data = False, early_stopping = False, for_hotstart_usage = False): ########## INITIALIZATION ########## self.RSS = [] start = timer() if not mute: loading_bar = _loading_bar(n_iter, "Conventional Arhcetypal Analysis") N, _ = X.T.shape Xt = torch.tensor(X,requires_grad=False).float() A = torch.autograd.Variable(torch.rand(K, N), requires_grad=True) B = torch.autograd.Variable(torch.rand(N, K), requires_grad=True) optimizer = optim.Adam([A, B], amsgrad = True, lr = lr) ########## ANALYSIS ########## for i in range(n_iter): if not mute: loading_bar._update() optimizer.zero_grad() L = self._error(Xt, self._apply_constraints(B), self._apply_constraints(A)) self.RSS.append(L.detach().numpy()) L.backward() optimizer.step() ########## EARLY STOPPING ########## if i % 25 == 0 and early_stopping: if len(self.RSS) > 200 and self._early_stopping(): if not mute: loading_bar._kill() print("Analysis ended due to early stopping.\n") break ########## POST ANALYSIS ########## A_f = self._apply_constraints(A).detach().numpy() B_f = self._apply_constraints(B).detach().numpy() Z_f = (Xt@self._apply_constraints(B)).detach().numpy() X_hat_f = X@B_f@A_f end = timer() time = round(end-start,2) result = _CAA_result(A_f, B_f, X, X_hat_f, n_iter, self.RSS, Z_f, K, p, time,columns,"CAA",with_synthetic_data = with_synthetic_data) if not mute: result._print() if not for_hotstart_usage: return result else: return A.detach().numpy(), B.detach().numpy()
AA-module
/AA_module-1.2.0-py3-none-any.whl/AAM/CAA_class.py
CAA_class.py
from timeit import default_timer as timer class _loading_bar: def __init__(self, count, headline): self.count = count self.current = 0 self.skip = 0 self.start = timer() print("\n{0} in progress...".format(headline)) def _update(self, loss = None): self.current += 1 if self.current == self.count: print("\r{0}".format(f"Finished Successfully after {round(timer()-self.start,1)} seconds! \n")) elif self.skip > self.count/133: remaining = (self.count/self.current)*(timer()-self.start)-(timer()-self.start) if remaining > 60: remaining = f"{round(remaining/60,1)} minutes remaining" else: remaining = f"{round(remaining,1)} seconds remaining" if not loss == None: print("\r{0}".format("|{0}{1}| {2}% finished // {3} (Loss: {4})".format("█"*int(round(self.current/self.count,1)*20),"-"*(20-int(round(self.current/self.count,1)*20)),round((self.current/self.count)*100,2),remaining, round(loss))), end = "", flush=True) print("\r{0}".format("|{0}{1}| {2}% finished // {3}".format("█"*int(round(self.current/self.count,1)*20),"-"*(20-int(round(self.current/self.count,1)*20)),round((self.current/self.count)*100,2),remaining)), end = "", flush=True) self.skip = 0 else: self.skip += 1 def _kill(self): print("\r{0}".format(f"Finished Successfully after {round(timer()-self.start,1)} seconds! \n"))
AA-module
/AA_module-1.2.0-py3-none-any.whl/AAM/loading_bar_class.py
loading_bar_class.py
import numpy as np from scipy.stats import norm import pickle ########## CLASS FOR CREATING SYNTHETIC DATA ########## class _synthetic_data: ########## INITIALIZATION - CREATES THE DATA ########## def __init__(self, N, M ,K, p, sigma, rb, a_param, b_param, sigma_std = 0): self.N = N self.M = M self.K = K self.p = p self.columns = ["SQ"+str(i) for i in range(1, M+1)] self.X, self.Z, self.Z_alpha, self.A, self.betas = self.X(N=N, M=M, K=K, p=p, sigma=sigma, rb=rb, a_param=a_param, b_param=b_param, sigma_std = sigma_std) ########## IF THERE IS RESPONSE BIAS IN THE DATA ########## def biasedBetas(self, N, p, b_param): b = np.array([b_param]*p) return np.random.dirichlet(b, size=N) ########## CONTSTRAINTS ON THE RESPONSE BIAS BETAS ########## def betaConstraintsBias(self, betas): N, J = betas.shape new_betas = np.empty((N,J)) denoms = np.sum(betas,axis=1) for i in range(N): for j in range(J): new_betas[i,j] = np.sum(betas[i,:j+1])/denoms[i] # Return and remove the column of ones return new_betas[:,:-1] ########## CONSTRAINTS ON THE NON RESOPNS BIAS BETAS ########## def betaConstraints(self, betas): new_betas = np.empty(len(betas)) denom = sum(betas) for i in range(len(new_betas)): new_betas[i] = np.sum(betas[:i+1]) / denom return new_betas[:-1] ########## SOFTPLUS HELPER FUNCTION ########## def softplus(self, sigma, sigma_std): if sigma_std == 0: return np.log(1 + np.exp(sigma)) else: sigmas = [] for n in range(self.N): sigmas.append(np.log(1 + np.exp(sigma + np.random.uniform(-1,1,1)*sigma_std))) sigmasMatrix = np.repeat(sigmas, self.M, axis=1) return sigmasMatrix ########## HELPER FUNCTION, CALCULATES THE Z ARCEHTYPE MATRIX ########## def get_Z(self, N, M, K, p, rb, b_param): # Ensure reproducibility np.random.seed(42) # Check to ensure that there are no NaN's if b_param < 0.01: b_param = 0.01 betas = np.array([b_param]*p) betas = self.betaConstraints(betas) alphas = np.empty(p) alphas[0] = (0 + betas[0]) / 2 alphas[-1] = (1+ betas[-1]) / 2 for i in range(len(betas)-1): alphas[i+1] = (betas[i] + betas[i+1]) / 2 Z_ordinal = np.ceil(np.random.uniform(0, 1, size = (M,K))*p).astype(int) Z_alpha = alphas[Z_ordinal-1] if rb == True: betas = self.biasedBetas(N=N, p=p, b_param=b_param) betas = self.betaConstraintsBias(betas) return Z_ordinal, Z_alpha, betas ########## HELPER FUNCTION, CALCULATES THE A LINEAZR COMBINATION MATRIX ########## def get_A(self, N, K, a_param): np.random.seed(42) # set another seed :) # Constrain a_param to avoid NaN's if a_param < 0.01: a_param = 0.01 alpha = np.array([a_param]*K) return np.random.dirichlet(alpha, size=N).transpose() ########## HELPER FUNCTION, CALCULATES THE D DENSITY MATRIX ########## def get_D(self, X_rec, betas, sigma, rb): M, N = X_rec.shape if rb == False: J = len(betas) D = np.empty((J+2, M, N)) for j in range(J+2): # Left-most tail if j == 0: D[j] = np.ones((M,N))*(np.inf*(-1)) # Right-most tail elif j == J+1: D[j] = np.ones((M,N))*(np.inf) else: D[j] = (betas[j-1] - X_rec)/(sigma.T+1e-16) ## Add softplus(sigma) else: J = len(betas[0,:]) D = np.empty((J+2, M, N)) # D = torch.rand(len(betas[0,:])+2,M,N) # D[0] = torch.tensor(np.matrix(np.ones((N)) * (-np.inf))) # D[-1] = torch.tensor(np.matrix(np.ones((N)) * (np.inf))) # D[1:-1] = torch.div(torch.unsqueeze(betas.T, 2).repeat(1,1,N)-X_rec.T,torch.unsqueeze(sigma+1e-16, 1).repeat(1,N)) for j in range(J+2): if j == 0: D[j] = np.ones((M,N))*(np.inf*(-1)) elif j == J+1: D[j] = np.ones((M,N))*(np.inf) else: D[j] = (betas[:,j-1] - X_rec)/((sigma.T+1e-16)) ## Add softplus(sigma) # D[j] = torch.div((b[:,j-1] - X_hat[:, None]),sigma)[:,0,:].T # print("SHAPEEEE", D.shape) # min = np.min(D[(D > -np.inf) & (D < np.inf)]) # max = np.max(D[(D > -np.inf) & (D < np.inf)]) # D[J+1] = np.ones((M,N))*(max + (max-min)/J) # D[0] = np.ones((M,N))*(min - (max-min)/J) # print("min, max:", min, max) # print("D_0", D[0]) return D - np.mean(D[1:-1]) ########## HELPER FUNCTION, CALCULATES THE PROBABILITY FROM THE DENSITY MATRIX ########## def Probs(self, D): J, M, N = D.shape probs = np.empty((J-1, M, N)) for i in range(J): if i != J-1: probs[i,:,:] = norm.cdf(D[i+1], loc=0, scale=1) - norm.cdf(D[i], loc=0, scale=1) return probs ########## HELPER FUNCTION, SAMPLES FROM PROBABILITY MATRIX TO GET CATEGORICAL ########## def toCategorical(self, probs): categories = np.arange(1, len(probs)+1) J, M, N = probs.shape X_cat = np.empty((M,N)) for m in range(M): for n in range(N): X_cat[m,n] = int(np.random.choice(categories, p = list(probs[:,m,n]))) X_cat = X_cat.astype(int) return X_cat ########## CALUCLATES DATA WITH HELP OF ALL OTHER FUNCTIONS ########## def X(self, M, N, K, p, sigma, rb=False, a_param=1, b_param=100, sigma_std = 0): Z_ordinal, Z_alpha, betas = self.get_Z(N=N,M=M, K=K, p=p, rb=rb, b_param=b_param) A = self.get_A(N, K, a_param=a_param) X_rec = Z_alpha@A D = self.get_D(X_rec, betas, self.softplus(sigma, sigma_std), rb=rb) probs = self.Probs(D) X_final = self.toCategorical(probs) return X_final, Z_ordinal, Z_alpha, A, betas ########## SAVES THE DATA LOCALLY ON PC ########## def _save(self,type,filename): file = open("synthetic_results/" + type + "_" + filename + '_metadata' + '.obj','wb') pickle.dump(self, file) file.close()
AA-module
/AA_module-1.2.0-py3-none-any.whl/AAM/synthetic_data_class.py
synthetic_data_class.py
from AAM.AA_class import AA
AA-module
/AA_module-1.2.0-py3-none-any.whl/AAM/__init__.py
__init__.py
""" Created on Thu Oct 24 11:44:50 2019 @author: Julia """ import pandas as pd import numpy as np from collections import defaultdict, Counter import logging from pyteomics import mass try: from pyteomics import cmass except ImportError: cmass = mass import string from . import utils, io logger = logging.getLogger(__name__) def get_theor_spectrum(peptide, acc_frag, ion_types=('b', 'y'), maxcharge=1, aa_mass=mass.std_aa_mass, modifications=None, **kwargs): """ Calculates theoretical spectra in two ways: usual one and in integer format (mz / frag_acc). Parameters ---------- peptide : list Peptide sequence. acc_frag : float Fragment mass accuracy in Da. ion_types : tuple Fragment ion types. ('b', 'y') maxcharge: int Maximum charge of fragment ion. aa_mass: dict Amino acid masses modifications : dict or None Dict of modifications applied to peptide (int position -> float mass shift) Returns ------- Returns spectrum in two ways (usual, integer). Usual is a dict with key [ion type, charge] and m/z as a value. Integer is a dict, where key is ion type and value is a set of integers (m/z / fragment accuracy). """ if not isinstance(peptide, list): raise Exception('peptide is not a list: {!r}'.format(peptide)) peaks = defaultdict(list) theor_set = defaultdict(list) aa_mass = aa_mass.copy() H = mass.nist_mass['H'][0][0] nterm_mod = aa_mass.pop('H-', H) OH = H + mass.nist_mass['O'][0][0] cterm_mod = aa_mass.pop('-OH', OH) if modifications is None: modifications = {} for ind, pep in enumerate(peptide[:-1]): for ion_type in ion_types: nterminal = ion_type[0] in 'abc' for charge in range(1, maxcharge + 1): if ind == 0: if nterminal: mz = cmass.fast_mass2( pep, ion_type=ion_type, charge=charge, aa_mass=aa_mass, **kwargs) + (nterm_mod - H + modifications.get(1, 0.)) / charge else: mz = cmass.fast_mass2(''.join(peptide[1:]), ion_type=ion_type, charge=charge, aa_mass=aa_mass, **kwargs) + (cterm_mod - OH) / charge + sum( val for key, val in modifications.items() if key > 1) / charge else: if nterminal: mz = peaks[ion_type, charge][-1] + (modifications.get(ind + 1, 0.) + aa_mass[pep]) / charge else: mz = peaks[ion_type, charge][-1] - (modifications.get(ind + 1, 0.) + aa_mass[pep]) / charge peaks[ion_type, charge].append(mz) theor_set[ion_type].append(int(mz / acc_frag)) theor_set = {k: set(v) for k, v in theor_set.items()} # if modifications: # utils.internal('aa_mass: %s', aa_mass) # utils.internal('Theoretical spectrum with modifications: %s, %s, %s', peptide, modifications, peaks) return peaks, theor_set def RNHS_fast(spectrum_idict, theoretical_set, min_matched, ion_types=('b', 'y')): """ Matches experimental and theoretical spectra in int formats. Parameters ---------- spectrum_idict : list Experimental spectrum in integer format. Output of preprocess_spectrum. theoretical_set: dict A dict where key is ion type and value is a set of integers (m/z / fragment accuracy). Output of get_theor_spec function. min_matched : int Minumum peaks to be matched. ion_types : tuple Fragment ion types. ('b', 'y') Returns ------- Number of matched peaks, score. """ matched = [] isum = 0 for ion_type in ion_types: match = 0 for ion in theoretical_set[ion_type]: if ion in spectrum_idict: match += 1 isum += spectrum_idict[ion] matched.append(match) matched_approx = sum(matched) if matched_approx >= min_matched: return matched_approx, np.prod([np.math.factorial(m) for m in matched]) * isum else: return 0, 0 _preprocessing_cache = {} def preprocess_spectrum(reader, spec_id, kwargs, acc=0.01): """ Prepares experimental spectrum for matching, converts experimental spectrum to int format. Default settings for preprocessing : maximum peaks is 100, dynamic range is 1000. Parameters ---------- reader : file reader Spectrum file reader spec_id : str Spectrum id. Returns ------- List of experimental mass spectrum in integer format. """ spectrum = _preprocessing_cache.setdefault((reader, spec_id), {}) if spectrum: # logger.debug('Returning cached spectrum %s', spec_id) return spectrum # logger.debug('Preprocessing new spectrum %s', spec_id) original = reader[spec_id] maxpeaks = kwargs.get('maxpeaks', 100) dynrange = kwargs.get('dynrange', 1000) mz_array = original['m/z array'] int_array = original['intensity array'] int_array = int_array.astype(np.float32) if dynrange: i = int_array > int_array.max() / dynrange int_array = int_array[i] mz_array = mz_array[i] if maxpeaks and int_array.size > maxpeaks: i = np.argsort(int_array)[-maxpeaks:] j = np.argsort(mz_array[i]) int_array = int_array[i][j] mz_array = mz_array[i][j] tmp = (mz_array / acc).astype(int) for idx, mt in enumerate(tmp): i = int_array[idx] spectrum[mt] = max(spectrum.get(mt, 0), i) spectrum[mt - 1] = max(spectrum.get(mt - 1, 0), i) spectrum[mt + 1] = max(spectrum.get(mt + 1, 0), i) return spectrum def peptide_isoforms(peptide, m, sites, prev_aa, next_aa): """ Parameters ---------- peptide : list Peptide sequence m: modification label to apply sites : set Amino acids eligible for modification Returns ------- set of lists """ isoforms = [] if ('N-term' in sites or 'Protein N-term' in sites and prev_aa == '-') and len(peptide[0]) == 1 and peptide[0] not in sites: isoforms.append((m + peptide[0],) + tuple(peptide[1:])) if ('C-term' in sites or 'Protein C-term' in sites and next_aa == '-') and len(peptide[-1]) == 1 and peptide[-1] not in sites: isoforms.append(tuple(peptide[:-1]) + (m + peptide[-1],)) for ind, a in enumerate(peptide): if a in sites: isoforms.append(tuple(peptide[:ind]) + (m + a,) + tuple(peptide[ind + 1:])) return isoforms def get_candidates_from_aastat(mass_shifts_table, labels, threshold=1.5): """ Get localization candidates from amono acid statistics. Parameters ---------- mass_shifts_table : DataFrame DF with amino acid statistics for all mass shifts. labels : list List of amino acids that should be considered. threshold : float Threshold to be considered as significantly changed. Results ------- Series with mass shift as index and list of candidates as value. """ df = mass_shifts_table.loc[:, labels] ms, aa = np.where(df > threshold) out = {ms: [] for ms in mass_shifts_table.index} for i, j in zip(ms, aa): out[df.index[i]].append(df.columns[j]) return pd.Series(out) def get_full_set_of_candidates(locmod_df): """ Build list of dicts from all_candidates column taking into account the sums of modification. Parameters ---------- locmod_df : DataFrame DF with candidates for mass shifts. Returns ------- Series """ out = defaultdict(list) for ind in locmod_df.index: out[ind].append({ind: locmod_df.at[ind, 'all candidates']}) if isinstance(locmod_df.at[ind, 'sum of mass shifts'], list): for pair in locmod_df.at[ind, 'sum of mass shifts']: tmp_dict = {} tmp_dict[pair[0]] = locmod_df.at[pair[0], 'all candidates'] if len(pair) > 1: tmp_dict[pair[1]] = locmod_df.at[pair[1], 'all candidates'] out[ind].append(tmp_dict) return pd.Series(out) def localization_of_modification(ms, ms_label, row, loc_candidates, params_dict, spectra_dict, mass_shift_dict): """ Localizes modification for mass shift in a peptide. If two peptides isoforms have the same score, modification counts as 'non-localized'. Parameters ---------- ms: float mass shift ms_label : str Label for considered mass shift. row : DataFrame row Data Frame row for filtered PSMs data. loc_candidates : list List of dicts with candidates for localization. locmod_df['loc candidates'] params_dict : dict Dict with all parameters. spectra_dict : dict Keys are filenames and values are Pyteomics readers. sum_mod : bool True if sum of codifications should be considered. Returns ------- Counter of localizations, top isoform, score difference """ mass_dict_0 = mass.std_aa_mass.copy() mass_dict_0.update(params_dict['fix_mod']) peptide = params_dict['peptides_column'] prev_aa = params_dict['prev_aa_column'] next_aa = params_dict['next_aa_column'] charge = row[params_dict['charge_column']] modif_labels = string.ascii_lowercase mod_dict = utils.get_var_mods(row, params_dict) loc_stat_dict = Counter() if params_dict['mzml_files']: scan = row[params_dict['spectrum_column']].split('.')[1].lstrip('0') spectrum_id = 'controllerType=0 controllerNumber=1 scan=' + scan else: spectrum_id = row[params_dict['spectrum_column']] exp_dict = preprocess_spectrum(spectra_dict[row['file']], spectrum_id, {}, acc=params_dict['frag_acc'],) top_score, second_score = 0, 0 top_isoform = None top_terms = None for terms in loc_candidates: scores = [] mass_dict = mass_dict_0.copy() isoform_part = [] new_isoform_part = [] i = 0 isoforms = [] sequences = [] for _ms in terms: mod_aa = {modif_labels[i] + aa: mass_shift_dict[_ms] + mass_dict[aa] for aa in params_dict['labels']} mass_dict.update(mod_aa) mass_dict[modif_labels[i]] = mass_shift_dict[_ms] if not isoform_part: # first modification within this shift (or whole shift) isoform_part += peptide_isoforms(list(row[peptide]), modif_labels[i], terms[_ms], row[prev_aa], row[next_aa]) if _ms == ms_label: # this is the whole-shift modification isoforms += isoform_part elif len(terms) == 1: # two equal mass shifts form this mass shift. Apply the second half for p in isoform_part: new_isoform_part += peptide_isoforms(p, modif_labels[i], terms[_ms], row[prev_aa], row[next_aa]) else: # second mass shift for p in isoform_part: new_isoform_part += peptide_isoforms(p, modif_labels[i], terms[_ms], row[prev_aa], row[next_aa]) i += 1 isoforms += new_isoform_part sequences = [list(x) for x in isoforms] # utils.internal('Generated %d isoforms for terms %s at shift %s', len(sequences), terms.keys(), ms_label) for seq in sequences: # utils.internal('seq = %s', seq) theor_spec = get_theor_spectrum(seq, params_dict['frag_acc'], maxcharge=charge, aa_mass=mass_dict, ion_types=params_dict['ion_types'], modifications=mod_dict) scores.append(RNHS_fast(exp_dict, theor_spec[1], params_dict['min_spec_matched'], ion_types=params_dict['ion_types'])[1]) scores = np.array(scores) i = np.argsort(scores)[::-1] scores = scores[i] sequences = np.array(sequences)[i] if scores.size: if scores[0] > top_score: second_score = top_score top_score = scores[0] top_isoform = sequences[0] top_terms = terms if scores.size > 1 and scores[1] > second_score: second_score = scores[1] if top_isoform is None: return loc_stat_dict, None, None, None, None if any(all(sites <= {'C-term', 'N-term'} for sites in terms.values()) for terms in loc_candidates): # utils.internal('Injecting unmodified spectra for %s', ms) unmod_spec = get_theor_spectrum(list(row[peptide]), params_dict['frag_acc'], maxcharge=charge, aa_mass=mass_dict_0, ion_types=params_dict['ion_types'], modifications=mod_dict) unmod_score = RNHS_fast(exp_dict, unmod_spec[1], params_dict['min_spec_matched'], ion_types=params_dict['ion_types'])[1] else: unmod_score = 0 if top_score == second_score or top_score <= unmod_score: utils.internal('top score = %f, second score = %f, unmod score = %f', top_score, second_score, unmod_score) loc_stat_dict['non-localized'] += 1 return loc_stat_dict, None, None, None, None mass_dict = mass_dict_0.copy() # utils.internal('Top isoform is %s for terms %s (shift %s)', top_isoform, top_terms, ms_label) i = 0 for _ms in top_terms: mod_aa = {modif_labels[i] + aa: mass_shift_dict[_ms] + mass_dict[aa] for aa in params_dict['labels']} mass_dict.update(mod_aa) mass_dict[modif_labels[i]] = mass_shift_dict[_ms] i += 1 for ind, a in enumerate(top_isoform): if len(a) > 1: if ind == 0: loc_stat_dict[utils.format_localization_key('N-term', mass_dict[a[0]])] += 1 elif ind == len(top_isoform) - 1: loc_stat_dict[utils.format_localization_key('C-term', mass_dict[a[0]])] += 1 loc_stat_dict[utils.format_localization_key(a[1], mass_dict[a[0]])] += 1 scorediff = (top_score - second_score) / top_score top_i = ''.join(top_isoform) ret = loc_stat_dict, top_i, top_terms, scorediff, utils.loc_positions(top_isoform) utils.internal('Returning: %s', ret) return ret def localization(df, ms, ms_label, locations_ms, params_dict, spectra_dict, mass_shift_dict): """ Localizes modification or sum of modifications for mass shift and repeat localization if there are redundant candidates. If two peptide isoforms have the same max score, modification counts as 'non-localized'. Parameters ---------- df : DataFrame DF with filtered peptides for considered mass shift. ms: float mass shift ms_label : str Considered mass shift label locations_ms : locmod_df['loc candidates'] params_dict : dict Dict with all paramenters. spectra_dict : dict Keys are filenames and values are Pyteomics readers. Returns ------- Counter of localizations. """ logger.info('Localizing %s...', ms_label) logger.debug('Localizations: %s', locations_ms) if len(locations_ms) < 2 and list(locations_ms[0].values())[0] == set(): df['localization_count'], df['top isoform'], df['top_terms'], df['localization score'], df['loc_position'] = None, None, None, None, None else: z = list(zip(*df.apply(lambda x: localization_of_modification( ms, ms_label, x, locations_ms, params_dict, spectra_dict, mass_shift_dict), axis=1))) utils.internal('z: %s', z) names = ['localization_count', 'top isoform', 'top_terms', 'localization score', 'loc_position'] dt = {'localization score': np.float32} for c, v in zip(names, z): t = dt.get(c, np.object_) # utils.internal('Saving %s as %s...', c, t) shape = (len(v), ) value = np.empty(shape, t) value[:] = v # utils.internal('Value: %s', value) df[c] = value fname = io.table_path(params_dict['output directory'], ms_label) peptide = params_dict['peptides_column'] mod_aa = string.ascii_lowercase mod_dicts = {} for pair in locations_ms: labels_mod = {} i = 0 for m in pair: labels_mod[mod_aa[i]] = m i += 1 mod_dicts[tuple(sorted(pair))] = labels_mod columns = ['top isoform', 'localization score', params_dict['spectrum_column']] df['top isoform'] = df['top isoform'].fillna(df[peptide]) df.loc[df.top_terms.notna(), 'mod_dict'] = df.loc[df.top_terms.notna(), 'top_terms'].apply(lambda t: mod_dicts[tuple(sorted(t))]) df['top isoform'] = df.apply(utils.format_isoform, axis=1, args=(params_dict,)) df[columns].to_csv(fname, index=False, sep='\t') result = df['localization_count'].sum() or Counter() logger.debug('Localization result for %s: %s', ms_label, result) return {ms_label: result}
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/localization.py
localization.py
import jinja2 import logging import os import sys import re import warnings import json import pkg_resources from datetime import datetime import math import operator import pandas as pd import lxml.html from pyteomics import mass from . import utils, stats logger = logging.getLogger(__name__) def format_unimod_repr(record): return '<a href="http://www.unimod.org/modifications_view.php?editid1={0[record_id]}">{0[title]}</a>'.format(record) def matches(row, ms, sites, params_dict): ldict = row['localization_count'] if 'non-localized' in ldict: return False for loc in ldict: site, shift = utils.parse_l10n_site(loc) if shift != ms: continue for possible_site, possible_position in sites: if site == possible_site: if possible_position[:3] == 'Any': # Anywhere, Any C-term, Any N-term return True if possible_position == 'Protein N-term' and row[params_dict['prev_aa_column']] == '-': return True if possible_position == 'Protein C-term' and row[params_dict['next_aa_column']] == '-': return True return False def format_unimod_info(row, df, params_dict): out = [] for record_id in row['unimod accessions']: record = utils.UNIMOD[record_id] name = format_unimod_repr(record) if 'top isoform' in df: sites = {(group['site'], group['position']) for group in record['specificity']} matching = df.apply(matches, args=(row.name, sites, params_dict), axis=1).sum() total = row['# peptides in bin'] out.append({'label': '{} ({:.0%} match)'.format(name, matching / total), 'priority': 1 - matching / total, 'type': 'unimod', 'ref': []}) else: out.append({'label': name, 'priority': 1, 'type': 'unimod', 'ref': []}) return out def get_label(table, ms, second=False): row = table.loc[ms] if len(row['raw info']) == 1: if len(row['unimod accessions']) == 1: record = utils.UNIMOD[next(iter(row['unimod accessions']))] return ('+ ' if second else '') + format_unimod_repr(record) return ms def get_artefact_interpretations(row, mass_shift_data_dict, locmod_df, params_dict): out = [] aa_mass = mass.std_aa_mass.copy() aa_mass.update(params_dict['fix_mod']) enz = params_dict.get('enzyme') df = mass_shift_data_dict[row.name][1] peps = df[params_dict['peptides_column']] match_aa = set() for aa, m in aa_mass.items(): if abs(abs(row['mass shift']) - m) < params_dict['frag_acc']: match_aa.add(aa) if not match_aa: return [] if enz: cut = set(enz['cut']) & match_aa nocut = set(enz.get('nocut', [])) else: cut, nocut = None, set() explained = False if row['mass shift'] < 0: # this can be a loss of any terminal amino acid, or ... # an artefact of open search, where the peptide is actually unmodified. # in the latter case the amino acid should be an enzyme cleavage site if cut: # possible artefact if enz['sense'] == 'C': pct = ( (peps.str[0].isin(cut) & ~peps.str[1].isin(nocut)) | # extra amino acid at N-term peps.str[-2].isin(cut) # extra amino acid at C-term ).sum() / df.shape[0] elif enz['sense'] == 'N': pct = ( peps.str[1].isin(cut) | (peps.str[-1].isin(cut) & ~peps.str[-2].isin(nocut)) ).sum() / df.shape[0] else: logger.critical('Unknown value of sense in specificity: %s', enz) sys.exit(1) logger.debug('%.1f%% of peptides in %s %s with %s.', pct * 100, row.name, ('start', 'end')[enz['sense'] == 'N'], utils.format_list(cut)) if pct > params_dict['artefact_thresh']: out.append('Search artefact: unmodified peptides with extra {} at {}-terminus ({:.0%} match)'.format( utils.format_list(cut), 'CN'[enz['sense'] == 'C'], pct)) explained = True else: logger.debug('Not enough peptide support search artefact interpretation.') if not explained: if 'top isoform' in df: lcount = locmod_df.at[row.name, 'localization'] pct = ( lcount.get(utils.format_localization_key('N-term', row.name), 0) + lcount.get(utils.format_localization_key('C-term', row.name), 0) ) / df.shape[0] logger.debug('%.1f%% of peptides in %s have terminal localization.', pct * 100, row.name) if pct > params_dict['artefact_thresh']: out.append('Loss of ' + utils.format_list(match_aa)) if not enz: out[-1] += ' or an open search artefact' else: # this may be a missed cleavage if cut: keys = [params_dict['prev_aa_column'], params_dict['next_aa_column']] pct = df[keys].apply( lambda row: bool(cut.intersection(row[keys[0]] + row[keys[1]])), axis=1).sum() / df.shape[0] logger.debug('%.1f%% of peptides in %s have %s as neighbor amino acid.', pct * 100, row.name, utils.format_list(cut)) if pct > params_dict['artefact_thresh']: out.append('Possible miscleavage (extra {} at terminus)'.format(utils.format_list(cut))) else: logger.debug('Not enough peptide support search artefact interpretation.') return out def collect_info(row, table, mass_shift_data_dict, locmod_df, params_dict): # Each interpretation is a dict with keys: label, priority, type, ref options = [{'label': x, 'priority': 0, 'type': 'artefact', 'ref': []} for x in get_artefact_interpretations( row, mass_shift_data_dict, locmod_df, params_dict)] options.extend(format_unimod_info(row, mass_shift_data_dict[row.name][1], params_dict)) if row['isotope index']: options.append({'label': 'isotope of {}', 'ref': [row['isotope index']], 'priority': abs(math.log10(table.at[row['isotope index'], '# peptides in bin'] / row['# peptides in bin'] / 8)), 'type': 'isotope'}) if isinstance(row['sum of mass shifts'], list): for terms in row['sum of mass shifts']: options.append({'label': '{} {}', 'ref': list(terms), 'type': 'sum', 'priority': 1 - min(table.at[terms[0], '# peptides in bin'], table.at[terms[1], '# peptides in bin']) / table['# peptides in bin'].max()}) logger.debug('Raw options for row %s: %s', row.name, options) return options def html_info_item(info): return '<span class="info_item {0[type]}" data-ref="{0[ref]}">{0[label]}</span>'.format(info) def format_info(row, table, char_limit): s = row['raw info'] for d in s: if d['type'] == 'isotope': d['label'] = d['label'].format(get_label(table, d['ref'][0])) if d['type'] == 'sum': d['label'] = d['label'].format(get_label(table, d['ref'][0]), get_label(table, d['ref'][1], second=True)) out = [] total_len = 0 for info in sorted(s, key=operator.itemgetter('priority')): out.append(html_info_item(info)) cur_len = len(lxml.html.document_fromstring(info['label']).text_content()) total_len += cur_len utils.internal('Label %s assigned length %d (total %d)', info['label'], cur_len, total_len) if total_len > char_limit: break else: return ', '.join(out) return ', '.join(out[:1]) + '... <span class="expand_info">(<a class="expand_info_link">expand</a>)</span>' def format_isoform(isoform): out = re.sub(r'([A-Z]\[[+-]?[0-9]+\])', r'<span class="loc">\1</span>', isoform) out = re.sub(r'([A-Z]?)\{([+-]?[0-9]+)\}', r'<span class="vmod_loc">\1[\2]</span>', out) out = re.sub(r'^([A-Z])\.', r'<span class="nterm"><span class="prev_aa">\1</span>.</span>', out) out = re.sub(r'\.([A-Z])$', r'<span class="cterm">.<span class="next_aa">\1</span></span>', out) return out def render_html_report(table_, mass_shift_data_dict, locmod_df, params_dict, recommended_fmods, recommended_vmods, vmod_combinations, opposite, save_directory, ms_labels, step=None): peptide = params_dict['peptides_column'] path = os.path.join(save_directory, 'report.html') if os.path.islink(path): logger.debug('Deleting link: %s.', path) os.remove(path) if table_ is None: with open(path, 'w') as f: f.write('No mass shifts found.') return table = table_.copy() labels = params_dict['labels'] table['raw info'] = table.apply(collect_info, axis=1, args=(table, mass_shift_data_dict, locmod_df, params_dict)) table['Possible interpretations'] = table.apply(format_info, args=(table, params_dict['html_truncate']), axis=1) full_info = json.dumps([', '.join(html_info_item(x) for x in sorted(y, key=operator.itemgetter('priority'))) for y in table['raw info']]) artefact_i = json.dumps([i for i, (aa, ms) in enumerate(recommended_vmods) if aa != 'isotope error' and any(x['type'] == 'artefact' for x in table.at[ms, 'raw info']) ]) with pd.option_context('display.max_colwidth', 250): columns = list(table.columns) mslabel = '<a id="binh" href="#">mass shift</a>' columns[0] = mslabel table.columns = columns to_hide = list({'is reference', 'sum of mass shifts', 'isotope index', 'unimod accessions', 'is isotope', 'unimod candidates', 'raw info'}.intersection(columns)) table_html = table.style.hide(axis='index').hide(to_hide, axis='columns').applymap( lambda val: 'background-color: yellow' if val > 1.5 else '', subset=labels ).apply( lambda row: ['background-color: #cccccc' if row['is reference'] else '' for cell in row], axis=1).set_table_styles([ {'selector': 'tr:hover', 'props': [('background-color', 'lightyellow')]}, {'selector': 'td, th', 'props': [('text-align', 'center')]}, {'selector': 'td, th', 'props': [('border', '1px solid black')]}] ).format({ mslabel: '<a href="#">{}</a>'.format(utils.MASS_FORMAT).format, '# peptides in bin': '<a href="#">{}</a>'.format}, precision=3 ).bar(subset='# peptides in bin', color=stats.cc[2]).to_html( uuid="aa_stat_table") peptide_tables = [] for ms in table.index: df = mass_shift_data_dict[ms][1] if 'localization score' in df and df['localization score'].notna().any(): df = df.sort_values(['localization score'], ascending=False).loc[:, ['top isoform', 'localization score', params_dict['spectrum_column']]] df['localization score'] = df['localization score'].astype(float) else: dfc = df[[peptide, params_dict['spectrum_column'], params_dict['mods_column']]].copy() dfc[peptide] = dfc.apply(utils.get_column_with_mods, axis=1, args=(params_dict,)) dfc[peptide] = ( df[params_dict['prev_aa_column']].str[0] + '.' + dfc[peptide] + '.' + df[params_dict['next_aa_column']].str[0]) df = dfc[[peptide, params_dict['spectrum_column']]] peptide_tables.append(df.to_html( table_id='peptides_' + ms, classes=('peptide_table',), index=False, escape=False, na_rep='', formatters={ 'top isoform': format_isoform, peptide: format_isoform, 'localization score': '{:.2f}'.format})) varmod_table_styles = [{'selector': 'th.col_heading', 'props': [('display', 'none')]}, {'selector': 'th.blank', 'props': [('display', 'none')]}, {'selector': '.data.row0', 'props': [('font-weight', 'bold')]}] if params_dict['fix_mod']: d = params_dict['fix_mod'].copy() d = utils.masses_to_mods(d) fixmod = pd.DataFrame.from_dict(d, orient='index', columns=['value']).T.style.set_caption( 'Configured, fixed').format(utils.MASS_FORMAT).to_html(uuid="set_fix_mod_table") else: fixmod = "Set modifications: none." if params_dict['var_mod']: varmod = pd.DataFrame.from_records(params_dict['var_mod'], columns=['', 'value']).T.style.set_caption( 'Configured, variable').format( lambda x: utils.mass_format(x) if isinstance(x, float) else x).set_table_styles( varmod_table_styles).to_html(uuid="set_var_mod_table") else: varmod = None if recommended_fmods: recmod = pd.DataFrame.from_dict(recommended_fmods, orient='index', columns=['value']).T.style.set_caption( 'Recommended, fixed').to_html(uuid="rec_fix_mod_table") else: recmod = "No fixed modifications recommended." if recommended_vmods: vmod_comb_i = json.dumps(list(vmod_combinations)) vmod_comb_val = json.dumps(['This modification is a combination of {} and {}.'.format(*v) for v in vmod_combinations.values()]) opp_mod_i = json.dumps(opposite) opp_mod_v = json.dumps(['This modification negates a fixed modification.\n' 'For closed search, it is equivalent to set {} @ {} as variable.'.format( utils.mass_format(-ms_labels[recommended_vmods[i][1]]), recommended_vmods[i][0]) for i in opposite]) rec_var_mods = pd.DataFrame.from_records(recommended_vmods, columns=['', 'value']).T.style.set_caption( 'Recommended, variable').format({'isotope error': '{:.0f}'}).set_table_styles(varmod_table_styles).to_html(uuid="rec_var_mod_table") else: rec_var_mods = "No variable modifications recommended." vmod_comb_i = vmod_comb_val = opp_mod_i = opp_mod_v = '[]' reference = table.loc[table['is reference']].index[0] if step is None: steps = '' else: if step != 1: prev_url = os.path.join(os.path.pardir, 'os_step_{}'.format(step - 1), 'report.html') prev_a = r'<a class="prev steplink" href="{}">Previous step</a>'.format(prev_url) else: prev_a = '' if recommended_fmods: next_url = os.path.join(os.path.pardir, 'os_step_{}'.format(step + 1), 'report.html') next_a = r'<a class="next steplink" href="{}">Next step</a>'.format(next_url) else: next_a = '' steps = prev_a + '\n' + next_a version = pkg_resources.get_distribution('AA_stat').version write_html(path, table_html=table_html, peptide_tables=peptide_tables, fixmod=fixmod, varmod=varmod, reference=reference, recmod=recmod, rec_var_mod=rec_var_mods, steps=steps, version=version, date=datetime.now(), vmod_comb_i=vmod_comb_i, vmod_comb_val=vmod_comb_val, opposite_i=opp_mod_i, opposite_v=opp_mod_v, full_info=full_info, artefact_i=artefact_i) def write_html(path, **template_vars): with warnings.catch_warnings(): if not sys.warnoptions: warnings.simplefilter('ignore') templateloader = jinja2.PackageLoader('AA_stat', '') templateenv = jinja2.Environment(loader=templateloader, autoescape=False) template_file = 'report.template' template = templateenv.get_template(template_file) with open(path, 'w') as output: output.write(template.render(template_vars))
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/html.py
html.py
import logging from collections import defaultdict import re from . import utils logger = logging.getLogger(__name__) def get_fix_mod_from_l10n(mslabel, locmod_df): l10n = locmod_df.at[mslabel, 'localization'] logger.debug('Localizations for %s: %s', mslabel, l10n) if l10n: l10n.pop('non-localized', None) top_loc = max(l10n, key=l10n.get) logger.debug('Top localization label for %s: %s', mslabel, top_loc) return top_loc def get_fixed_mod_raw(aa, data_dict, params_dict, choices=None): dist_aa = [] for ms, v in data_dict.items(): if choices is None or ms in choices: dist_aa.append([v[0], v[1][params_dict['peptides_column']].apply(lambda x: x.count(aa)).sum()]) utils.internal('Counts for %s: %s', aa, dist_aa) top_shift = max(dist_aa, key=lambda tup: tup[1]) return utils.mass_format(top_shift[0]) def determine_fixed_mods_nonzero(reference, locmod_df, data_dict): """Determine fixed modifications in case the reference shift is not at zero. Needs localization. """ utils.internal('Localizations for %s: %s', reference, locmod_df.at[reference, 'localization']) loc = get_fix_mod_from_l10n(reference, locmod_df) label = reference data_dict = data_dict.copy() while loc is None: del data_dict[label] label = max(data_dict, key=lambda k: data_dict[k][1].shape[0]) loc = get_fix_mod_from_l10n(label, locmod_df) logger.debug('No luck. Trying %s. Got %s', label, loc) if not data_dict: break return loc def determine_fixed_mods_zero(aastat_result, data_dict, params_dict): """Determine fixed modifications in case the reference shift is at zero. Does not need localization. """ fix_mod_zero_thresh = params_dict['fix_mod_zero_thresh'] min_fix_mod_pep_count_factor = params_dict['min_fix_mod_pep_count_factor'] fix_mod_dict = {} reference = utils.mass_format(0) aa_rel = aastat_result[reference][2] utils.internal('aa_rel:\n%s', aa_rel) candidates = aa_rel[aa_rel < fix_mod_zero_thresh].index logger.debug('Fixed mod candidates: %s', candidates) for i in candidates: candidate_label = get_fixed_mod_raw(i, data_dict, params_dict) if candidate_label != reference: # number of peptides with `i` at shift `candidate label` must be higher than ... count_cand = data_dict[candidate_label][1][params_dict['peptides_column']].str.contains(i).sum() # number of peptides with `i` at shift `reference` by a factor of `min_fix_mod_pep_count_factor` count_ref = data_dict[reference][1][params_dict['peptides_column']].str.contains(i).sum() # peptide count at candidate shift over # of peptides at reference est_ratio = count_cand / len(data_dict[reference][1]) logger.debug('Peptides with %s: ~%d at %s, ~%d at %s. Estimated pct: %f', i, count_ref, reference, count_cand, candidate_label, est_ratio) if aastat_result[candidate_label][2][i] > fix_mod_zero_thresh and ( est_ratio * 100 > fix_mod_zero_thresh * min_fix_mod_pep_count_factor): fix_mod_dict[i] = candidate_label else: logger.debug('Could not find %s anywhere. Can\'t fix.', i) else: logger.debug('Reference shift is the best for %s.', i) return fix_mod_dict def determine_fixed_mods(aastat_result, aastat_df, locmod_df, data_dict, params_dict): reference = aastat_df.loc[aastat_df['is reference']].index[0] if reference == utils.mass_format(0): logger.info('Reference bin is at zero shift.') fix_mod_dict = determine_fixed_mods_zero(aastat_result, data_dict, params_dict) else: if locmod_df is None: logger.warning('No localization data. ' 'Cannot determine fixed modifications when reference mass shift is non-zero.') return {} logger.info('Reference bin is at %s. Looking for fixed modification to compensate.', reference) loc = determine_fixed_mods_nonzero(reference, locmod_df, data_dict) if loc: aa, shift = utils.parse_l10n_site(loc) fix_mod_dict = {aa: shift} else: logger.info('No localizations. Stopping.') return fix_mod_dict def recommend_isotope_error(aastat_df, locmod_df, params_dict): reference = aastat_df.loc[aastat_df['is reference']].index[0] ref_peptides = locmod_df.at[reference, '# peptides in bin'] logger.debug('%d peptides at reference %s', ref_peptides, reference) ref_isotopes = [] label = reference while label: label = utils.get_isotope_shift(label, locmod_df) ref_isotopes.append(label) ref_isotopes.pop() i = 0 for i, label in enumerate(ref_isotopes, 1): peps = locmod_df.at[label, '# peptides in bin'] logger.debug('%d peptides at %s.', peps, label) if peps * 100 / ref_peptides < params_dict['recommend isotope threshold']: return i - 1 return i def recalculate_counts(aa, ms, mods_and_counts, data_dict): mods_and_counts[aa].pop(ms, None) for i, row in data_dict[ms][1].iterrows(): seq = row['top isoform'].split('.')[1] if row['top_terms'] is not None and ms in row['top_terms']: if aa == 'N-term' and seq[1] == '[': utils.internal('Reducing count of %s for %s (%s)', seq[0], seq, aa) if mods_and_counts[seq[0]].get(ms, 0) > 0: mods_and_counts[seq[0]][ms] -= 1 elif aa == 'C-term' and seq[-1] == ']': res = seq.split('[')[0][-1] utils.internal('Reducing count of %s for %s (%s)', res, seq, aa) if mods_and_counts[res].get(ms, 0) > 0: mods_and_counts[res][ms] -= 1 elif seq[:2] == aa + '[': utils.internal('Reducing count of N-term for %s', seq) if mods_and_counts['N-term'].get(ms, 0) > 0: mods_and_counts['N-term'][ms] -= 1 elif seq[-1] == ']' and seq.split('[')[0][-1] == aa: utils.internal('Reducing count of C-term for %s', seq) if mods_and_counts['C-term'].get(ms, 0) > 0: mods_and_counts['C-term'][ms] -= 1 def recalculate_with_isotopes(aa, ms, isotope_rec, mods_and_counts, data_dict, locmod_df): logger.debug('Recalculating counts for %s @ %s', aa, ms) recalculate_counts(aa, ms, mods_and_counts, data_dict) i = 0 while i < isotope_rec: label = utils.get_isotope_shift(ms, locmod_df) if label: logger.debug('Recalculating %s counts for isotope shift %s', aa, label) recalculate_counts(aa, label, mods_and_counts, data_dict) i += 1 else: break def same_residue(isoform): return ']{' in isoform or re.search(r'\.{[0-9+-]*?}[A-Z]\[', isoform) def recalculate_varmods(data_dict, mods_and_counts, params_dict): # cancel out already configured modifications for site, mod in params_dict['var_mod']: ms = utils.find_mass_shift(mod, data_dict, params_dict['prec_acc']) if ms: if mods_and_counts[site].get(ms, 0) > 0: logger.debug('Setting all counts for %s @ %s to zero.', ms, site) mods_and_counts[site][ms] = 0 for ms in data_dict: shift, df = data_dict[ms] for i, row in df.iterrows(): if row['top_terms'] is not None and ms in row['top_terms']: peptide = row[params_dict['peptides_column']] if same_residue(row['top isoform']): # localization and enabled variable modification on the same residue # this should count towards sum of these shifts, not the localized one pos = row['loc_position'][0] mods = utils.get_var_mods(row, params_dict) utils.internal('%s: extracting %d from %s', row['top isoform'], pos, mods) if pos in mods: vm = mods[pos] elif pos == 1: vm = mods[0] elif pos == len(peptide): vm = mods[pos + 1] else: raise KeyError() aa = peptide[pos - 1] if mods_and_counts[aa].get(ms, 0) > 0: utils.internal('Reducing count of %s at %s', aa, ms) mods_and_counts[aa][ms] -= 1 if pos == 1 and mods_and_counts['N-term'].get(ms, 0) > 0: mods_and_counts['N-term'][ms] -= 1 utils.internal('Reducing count of N-term at %s', ms) if pos == len(peptide) and mods_and_counts['C-term'].get(ms, 0) > 0: utils.internal('Reducing count of C-term at %s', ms) mods_and_counts['C-term'][ms] -= 1 sum_ms = utils.find_mass_shift(vm + shift, data_dict, params_dict['prec_acc']) if sum_ms: mods_and_counts[aa][sum_ms] = mods_and_counts[aa].get(sum_ms, 0) + 1 utils.internal('Increasing count of %s at %s', aa, sum_ms) if pos == 1: utils.internal('Increasing count of N-term at %s', sum_ms) mods_and_counts['N-term'][sum_ms] = mods_and_counts['N-term'].get(sum_ms, 0) + 1 if pos == len(peptide): utils.internal('Increasing count of C-term at %s', sum_ms) mods_and_counts['C-term'][sum_ms] = mods_and_counts['C-term'].get(sum_ms, 0) + 1 def determine_var_mods(aastat_result, aastat_df, locmod_df, data_dict, params_dict, recommended_fix_mods=None): if locmod_df is None: logger.info('Cannot recommend variable modifications without localization.') return {} var_mods = [] recommended = set() multiple = params_dict['multiple_mods'] if multiple: logger.info('Recommending multiple modifications on same residue.') else: logger.info('Recommending one modification per residue.') isotope_rec = recommend_isotope_error(aastat_df, locmod_df, params_dict) logger.info('Recommended isotope mass error: %d.', isotope_rec) if isotope_rec: var_mods.append(('isotope error', isotope_rec)) reference = aastat_df.loc[aastat_df['is reference']].index[0] mods_and_counts = defaultdict(dict) # dict of AA: shift label: count for shift in data_dict: if shift == reference: continue l10n = locmod_df.at[shift, 'localization'] for k, count in l10n.items(): if k == 'non-localized': continue aa, locshift = utils.parse_l10n_site(k) if locshift == shift: mods_and_counts[aa][shift] = count logger.debug('Without isotopes, localization counts are:') for k, d in mods_and_counts.items(): logger.debug('%s: %s', k, d) if isotope_rec: for aa, dcounts in mods_and_counts.items(): for shift, count in list(dcounts.items()): i = 0 while i < isotope_rec: label = utils.get_isotope_shift(shift, locmod_df) if label: dcounts[shift] = dcounts.get(shift, 0) + mods_and_counts[aa].get(label, 0) # dcounts.pop(label, None) i += 1 else: break i = 0 shift = reference while i < isotope_rec: label = utils.get_isotope_shift(shift, locmod_df) if label: logger.debug('Removing all counts for isotope shift %s', label) for aa, dcounts in mods_and_counts.items(): dcounts[label] = 0 i += 1 else: break logger.debug('With isotopes, localization counts are:') for k, d in mods_and_counts.items(): logger.debug('%s: %s', k, d) if recommended_fix_mods: logger.debug('Subtracting counts for fixed mods.') for aa, shift in recommended_fix_mods.items(): recalculate_with_isotopes(aa, shift, isotope_rec, mods_and_counts, data_dict, locmod_df) if params_dict['var_mod']: if not multiple: logger.info('Multiple variable modifications are disabled, not recommending {} for variable modifications.'.format( utils.format_list(set(x[0] for x in params_dict['var_mod'])))) for aa, shift in params_dict['var_mod']: logger.debug('Removing all counts for %s.', aa) for sh in mods_and_counts[aa]: mods_and_counts[aa][sh] = 0 logger.debug('Subtracting counts for variable mods.') recalculate_varmods(data_dict, mods_and_counts, params_dict) for i in range(params_dict['variable_mods']): logger.debug('Choosing variable modification %d. Counts are:', i + 1) for k, d in mods_and_counts.items(): logger.debug('%s: %s', k, d) aa_shifts = {aa: max(dcounts, key=dcounts.get) for aa, dcounts in mods_and_counts.items() if dcounts} if mods_and_counts: aa_counts = {aa: mods_and_counts[aa][shift] for aa, shift in aa_shifts.items()} logger.debug('Best localization counts: %s', aa_shifts) logger.debug('Values: %s', aa_counts) if aa_shifts: top_aa = max(aa_shifts, key=aa_counts.get) top_shift = aa_shifts[top_aa] top_count = aa_counts[top_aa] if top_count < params_dict['min_loc_count']: logger.debug('Localization count too small (%d), stopping.', top_count) break recommended.add(top_aa) var_mods.append((top_aa, top_shift)) logger.debug('Chose %s @ %s.', top_shift, top_aa) recalculate_with_isotopes(top_aa, top_shift, isotope_rec, mods_and_counts, data_dict, locmod_df) if not multiple: logger.debug('Removing all counts for %s.', top_aa) for sh in mods_and_counts[top_aa]: mods_and_counts[top_aa][sh] = 0 return var_mods
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/recommendations.py
recommendations.py
import matplotlib matplotlib.use('Agg') import pylab as plt import math import os import logging import warnings import multiprocessing as mp import numpy as np from sklearn import cluster from scipy.optimize import curve_fit from scipy.signal import argrelextrema, savgol_filter import seaborn as sb from . import utils logger = logging.getLogger(__name__) logging.getLogger('matplotlib.font_manager').disabled = True logging.getLogger('matplotlib.category').disabled = True cc = ["#FF6600", "#FFCC00", "#88AA00", "#006688", "#5FBCD3", "#7137C8", ] sb.set_style('white') colors = sb.color_palette(palette=cc) def _gauss_fit_slice(to_fit, unit, filename, title, params_dict, mpl_back): logger.debug('Fitting zero-shift peptides...') f = plt.figure() hist_0 = np.histogram(to_fit, bins=int(params_dict['zero_window'] / params_dict['bin_width'])) hist_y = hist_0[0] hist_x = 0.5 * (hist_0[1][:-1] + hist_0[1][1:]) plt.plot(hist_x, hist_y, 'b+') popt, perr = gauss_fitting(max(hist_y), hist_x, hist_y) plt.scatter(hist_x, gauss(hist_x, *popt), label='Gaussian fit') plt.xlabel('massdiff, ' + unit) plt.title(title) mpl_back.savefig(f) plt.close() logger.info('Systematic shift is %.4f %s for file %s [ %s ]', popt[1], unit, filename, title) return popt def clusters(df, to_fit, unit, filename, params_dict, mpl_back): if to_fit.shape[0] < 500: logger.warning('Not enough data for cluster analysis. Need at least 500 peptides near zero, found %d.', to_fit.shape[0]) return None X = np.empty((to_fit.shape[0], 2)) X[:, 0] = to_fit X[:, 1] = df.loc[to_fit.index, params_dict['rt_column']] logger.debug('Clustering a %s array.', X.shape) logger.debug('Initial dimensions: %s to %s', X.min(axis=0), X.max(axis=0)) logger.debug('Converting to square...') span_0 = X[:, 0].max() - X[:, 0].min() span_1 = X[:, 1].max() - X[:, 1].min() ratio = span_1 / span_0 X[:, 0] *= ratio logger.debug('Transformed dimensions: %s to %s', X.min(axis=0), X.max(axis=0)) eps = span_1 * params_dict['zero_window'] * params_dict['eps_adjust'] logger.debug('Using eps=%f', eps) clustering = cluster.DBSCAN(eps=eps, min_samples=params_dict['min_samples']).fit(X) f = plt.figure() sc = plt.scatter(to_fit, X[:, 1], c=clustering.labels_) plt.legend(*sc.legend_elements(), title='Clusters') plt.xlabel(unit) plt.ylabel(params_dict['rt_column']) mpl_back.savefig(f) plt.close() f = plt.figure() for c in np.unique(clustering.labels_): plt.hist(X[clustering.labels_ == c, 1], label=c, alpha=0.5) plt.xlabel(params_dict['rt_column']) plt.legend() mpl_back.savefig(f) plt.close() return clustering def cluster_time_span(clustering, label, df, to_fit, params_dict): times = df.loc[to_fit.index].loc[clustering.labels_ == label, params_dict['rt_column']] return times.min(), times.max() def span_percentage(span, df, to_fit, params_dict): start, end = span all_rt = df[params_dict['rt_column']] return (end - start) / (all_rt.max() - all_rt.min()) def cluster_time_percentage(clustering, label, df, to_fit, params_dict): span = cluster_time_span(clustering, label, df, to_fit, params_dict) return span_percentage(span, df, to_fit, params_dict) def filter_clusters(clustering, df, to_fit, params_dict): nclusters = clustering.labels_.max() + 1 logger.debug('Found %d clusters, %d labels assigned.', nclusters, clustering.labels_.size) if not nclusters: return [] out = [] clustered_peps = 0 for i in np.unique(clustering.labels_): if i == -1: continue npep = (clustering.labels_ == i).sum() if npep < params_dict['min_peptides_for_mass_calibration']: logger.debug('Cluster %s is too small for calibration (%d), discarding.', i, npep) continue span_pct = cluster_time_percentage(clustering, i, df, to_fit, params_dict) if span_pct < params_dict['cluster_span_min']: logger.debug('Cluster %s spans %.2f%% of the run (too small, thresh = %.2f%%). Discarding.', i, span_pct * 100, params_dict['cluster_span_min'] * 100) continue out.append(i) clustered_peps += npep logger.debug('Pre-selected clusters: %s', out) logger.debug('%.2f%% peptides in clusters, threshold is %.2f%%.', clustered_peps / df.shape[0] * 100, params_dict['clustered_pct_min'] * 100) if clustered_peps / df.shape[0] < params_dict['clustered_pct_min']: logger.debug('Too few peptides in clusters, discarding clusters altogether.') return [] return out def get_fittable_series(df, params_dict, mask=None): window = params_dict['zero_window'] shifts = params_dict['mass_shifts_column'] loc = df[shifts].abs() < window # logger.debug('loc size for zero shift: %s', loc.size) if params_dict['calibration'] == 'gauss': to_fit = df.loc[loc, shifts] unit = 'Da' elif params_dict['calibration'] == 'gauss_relative': to_fit = df.loc[loc, shifts] * 1e6 / df.loc[loc, params_dict['calculated_mass_column']] unit = 'ppm' elif params_dict['calibration'] == 'gauss_frequency': freq_measured = 1e6 / np.sqrt(utils.measured_mz_series(df, params_dict)) freq_calculated = 1e6 / np.sqrt(utils.calculated_mz_series(df, params_dict)) to_fit = (freq_measured - freq_calculated).loc[loc] unit = 'freq. units' if mask is not None: to_fit = to_fit.loc[mask] logger.debug('Returning a %s fittable series for a %s dataframe with a %s mask.', to_fit.shape, df.shape, mask.shape if mask is not None else None) return to_fit, unit def get_cluster_masks(filtered_clusters, clustering, df, to_fit, params_dict): all_rt = df[params_dict['rt_column']] time_spans = {i: cluster_time_span(clustering, i, df, to_fit, params_dict) for i in filtered_clusters} sorted_clusters = sorted(filtered_clusters, key=time_spans.get) # sorts by span start i = 0 prev = all_rt.min() masks = {} while i < len(sorted_clusters): cur_end = time_spans[sorted_clusters[i]][1] if i == len(sorted_clusters) - 1: next_point = all_rt.max() + 1 else: next_start = time_spans[sorted_clusters[i + 1]][0] next_point = (cur_end + next_start) / 2 logger.debug('Time span %.1f - %.1f assigned to cluster %s', prev, next_point, sorted_clusters[i]) masks[sorted_clusters[i]] = (all_rt >= prev) & (all_rt < next_point) i += 1 prev = next_point assigned_masks = [masks[c] for c in filtered_clusters] return assigned_masks def smooth(y, window_size=15, power=5): """ Smoothes function. Paramenters ----------- y : array-like function to smooth. window_size : int Smothing window. power : int Power of smothing function. Returns ------- Smoothed function """ y_smooth = savgol_filter(y, window_size, power) return y_smooth def gauss(x, a, x0, sigma): with warnings.catch_warnings(): warnings.simplefilter("ignore") return a / sigma / np.sqrt(2 * np.pi) * np.exp(-(x - x0) * (x - x0) / (2 * sigma ** 2)) def gauss_fitting(center_y, x, y): """ Fits with Gauss function `center_y` - starting point for `a` parameter of gauss `x` numpy array of mass shifts `y` numpy array of number of psms in this mass shifts """ mean = (x * y).sum() / y.sum() sigma = np.sqrt((y * (x - mean) ** 2).sum() / y.sum()) a = center_y * sigma * np.sqrt(2 * np.pi) try: popt, pcov = curve_fit(gauss, x, y, p0=(a, mean, sigma)) perr = np.sqrt(np.diag(pcov)) return popt, perr except (RuntimeError, TypeError): return None, None def fit_worker(args): return fit_batch_worker(*args) def fit_batch_worker(out_path, batch_size, xs, ys, half_window, height_error, sigma_error): shape = int(math.ceil(np.sqrt(batch_size))) figsize = (shape * 3, shape * 4) plt.figure(figsize=figsize) plt.tight_layout() logger.debug('Created a figure with size %s', figsize) poptpvar = [] for i in range(batch_size): center = i * (2 * half_window + 1) + half_window x = xs[center - half_window: center + half_window + 1] y = ys[center - half_window: center + half_window + 1] popt, perr = gauss_fitting(ys[center], x, y) plt.subplot(shape, shape, i + 1) if popt is None: label = 'NO FIT' else: if (x[0] <= popt[1] and popt[1] <= x[-1] and perr[0] / popt[0] < height_error and perr[2] / popt[2] < sigma_error): label = 'PASSED' poptpvar.append(np.concatenate([popt, perr])) plt.vlines(popt[1] - 3 * popt[2], 0, ys[center], label='3sigma interval') plt.vlines(popt[1] + 3 * popt[2], 0, ys[center]) else: label = 'FAILED' plt.plot(x, y, 'b+:', label=label) if label != 'NO FIT': plt.scatter(x, gauss(x, *popt), label='Gaussian fit\n $\\sigma$ = {:.4f}'.format(popt[2])) plt.legend() plt.title("{0:.3f}".format(xs[center])) plt.grid(True) logger.debug('Fit done. Saving %s...', out_path) plt.savefig(out_path) plt.close() return poptpvar def fit_peaks(data, args, params_dict): """ Finds Gauss-like peaks in mass shift histogram. Parameters ---------- data : DataFRame A DF with all (non-filtered) results of open search. args: argsparse params_dict : dict Parameters dict. """ logger.info('Performing Gaussian fit...') fit_batch = params_dict['fit batch'] half_window = int(params_dict['window'] / 2) + 1 hist = np.histogram(data.loc[data['is_decoy'] == False, params_dict['mass_shifts_column']], bins=params_dict['bins']) hist_y = smooth(hist[0], window_size=params_dict['window'], power=5) hist_x = 0.5 * (hist[1][:-1] + hist[1][1:]) loc_max_candidates_ind = argrelextrema(hist_y, np.greater_equal)[0] # smoothing and finding local maxima min_height = 2 * np.median(hist[0][hist[0] > 1]) # minimum bin height expected to be peak approximate noise level as median of all non-negative loc_max_candidates_ind = loc_max_candidates_ind[hist_y[loc_max_candidates_ind] >= min_height] if not loc_max_candidates_ind.size: logger.info('No peaks found for fit.') return hist, np.array([]) height_error = params_dict['max_deviation_height'] sigma_error = params_dict['max_deviation_sigma'] logger.debug('Candidates for fit: %s', len(loc_max_candidates_ind)) nproc = int(math.ceil(len(loc_max_candidates_ind) / fit_batch)) maxproc = params_dict['processes'] if maxproc > 0: nproc = min(nproc, maxproc) if nproc > 1: arguments = [] logger.debug('Splitting the fit into %s batches...', nproc) n = min(nproc, mp.cpu_count()) logger.debug('Creating a pool of %s processes.', n) pool = mp.Pool(n) for proc in range(nproc): xlist = [hist_x[center - half_window: center + half_window + 1] for center in loc_max_candidates_ind[proc * fit_batch: (proc + 1) * fit_batch]] xs = np.concatenate(xlist) ylist = [hist[0][center - half_window: center + half_window + 1] for center in loc_max_candidates_ind[proc * fit_batch: (proc + 1) * fit_batch]] ys = np.concatenate(ylist) out = os.path.join(args.dir, 'gauss_fit_{}.pdf'.format(proc + 1)) arguments.append((out, len(xlist), xs, ys, half_window, height_error, sigma_error)) res = pool.map_async(fit_worker, arguments) poptpvar_list = res.get() # logger.debug(poptpvar_list) pool.close() pool.join() logger.debug('Workers done.') poptpvar = [p for r in poptpvar_list for p in r] else: xs = np.concatenate([hist_x[center - half_window: center + half_window + 1] for center in loc_max_candidates_ind]) ys = np.concatenate([hist[0][center - half_window: center + half_window + 1] for center in loc_max_candidates_ind]) poptpvar = fit_batch_worker(os.path.join(args.dir, 'gauss_fit.pdf'), len(loc_max_candidates_ind), xs, ys, half_window, height_error, sigma_error) logger.debug('Returning from fit_peaks. Array size is %d.', len(poptpvar)) return np.array(poptpvar) _Mkstyle = matplotlib.markers.MarkerStyle _marker_styles = [_Mkstyle('o', fillstyle='full'), (_Mkstyle('o', fillstyle='left'), _Mkstyle('o', fillstyle='right')), (_Mkstyle('o', fillstyle='top'), _Mkstyle('o', fillstyle='bottom')), (_Mkstyle(8), _Mkstyle(9)), (_Mkstyle('v'), _Mkstyle('^')), (_Mkstyle('|'), _Mkstyle('_')), (_Mkstyle('+'), _Mkstyle('x'))] def _generate_pair_markers(): '''Produce style & color pairs for localization markers (except the main one).''' for i in [3, 4, 5, 0, 1, 2]: for ms in _marker_styles[1:]: yield colors[i], ms def _get_max(arr): values = [x for x in arr if x is not None] if values: return max(values) return 0 def plot_figure(ms_label, ms_counts, left, right, params_dict, save_directory, localizations=None, sumof=None): """ Plots amino acid spatistics. Parameters ---------- ms_label : str Mass shift in string format. ms_counts : int Number of peptides in a mass shift. left : list Amino acid statistics data [[values], [errors]] right : list Amino acid frequences in peptides params_dict : dict Parameters dict. save_directory: str Saving directory. localizations : Counter Localization counter using ms/ms level. sumof : List List of str tuples for constituent mass shifts. """ b = 0.1 # shift in bar plots width = 0.2 # for bar plots labels = params_dict['labels'] labeltext = ms_label + ' Da mass shift,\n' + str(ms_counts) + ' peptides' x = np.arange(len(labels)) distributions = left[0] errors = left[1] fig, ax_left = plt.subplots() fig.set_size_inches(params_dict['figsize']) ax_left.bar(x - b, distributions.loc[labels], yerr=errors.loc[labels], width=width, color=colors[2], linewidth=0) ax_left.set_ylabel('Relative AA abundance', color=colors[2]) ax_left.set_xticks(x) ax_left.set_xticklabels(labels) ax_left.hlines(1, -1, x[-1] + 1, linestyles='dashed', color=colors[2]) ax_right = ax_left.twinx() ax_right.bar(x + b, right, width=width, linewidth=0, color=colors[0]) ax_right.set_ylim(0, 125) ax_right.set_yticks(np.arange(0, 120, 20)) ax_right.set_ylabel('Peptides with AA, %', color=colors[0]) ax_left.spines['left'].set_color(colors[2]) ax_right.spines['left'].set_color(colors[2]) ax_left.spines['right'].set_color(colors[0]) ax_right.spines['right'].set_color(colors[0]) ax_left.tick_params('y', colors=colors[2]) ax_right.tick_params('y', colors=colors[0]) pright = matplotlib.lines.Line2D([], [], marker=None, label=labeltext, alpha=0) ax_left.set_xlim(-1, x[-1] + 1) ax_left.set_ylim(0, distributions.loc[labels].max() * 1.4) logger.debug('Localizations for %s figure: %s', ms_label, localizations) if localizations: ax3 = ax_left.twinx() ax3.spines['right'].set_position(('axes', 1.1)) ax3.set_frame_on(True) ax3.patch.set_visible(False) ax3.set_ylabel('Localization count', color=colors[3]) for sp in ax3.spines.values(): sp.set_visible(False) ax3.spines['right'].set_visible(True) ax3.spines['right'].set_color(colors[3]) ax3.tick_params('y', colors=colors[3]) # plot simple modifications (not sum) with the first style, # then parts of sum values = [localizations.get(key + '_' + ms_label) for key in labels] maxcount = _get_max(values) label_prefix = 'Location of ' ax3.scatter(x, values, marker=_marker_styles[0], color=colors[3], label=label_prefix + ms_label) if isinstance(sumof, list): for pair, (color, style) in zip(sumof, _generate_pair_markers()): values_1 = [localizations.get(key + '_' + pair[0]) for key in labels] maxcount = max(maxcount, _get_max(values_1)) ax3.scatter(x, values_1, marker=style[0], color=color, label=label_prefix + pair[0]) if pair[0] != pair[1]: values_2 = [localizations.get(key + '_' + pair[1]) for key in labels] if values_2: maxcount = max(maxcount, _get_max(values_2)) ax3.scatter(x, values_2, marker=style[1], color=color, label=label_prefix + pair[1]) terms = {key for key in localizations if key[1:6] == '-term'} # logger.debug('Found terminal localizations: %s', terms) for t in terms: label = '{} at {}: {}'.format(*reversed(t.split('_')), localizations[t]) p = ax3.plot([], [], label=label)[0] p.set_visible(False) pright.set_label(pright.get_label() + '\nNot localized: {}'.format(localizations.get('non-localized', 0))) if maxcount: ax3.legend(loc='upper left', ncol=2) ax3.set_ylim(0, 1.4 * max(maxcount, 1)) ax_right.legend(handles=[pright], loc='upper right', edgecolor='dimgrey', fancybox=True, handlelength=0) fig.tight_layout() fig.savefig(os.path.join(save_directory, ms_label + '.png'), dpi=500) fig.savefig(os.path.join(save_directory, ms_label + '.svg')) plt.close() def summarizing_hist(table, save_directory): width = 0.8 fig, ax = plt.subplots(figsize=(len(table), 5)) ax.bar(range(len(table)), table.sort_values('mass shift')['# peptides in bin'], color=colors[2], align='center', width=width) ax.set_title('Peptides in mass shifts', fontsize=12) ax.set_xlabel('Mass shift', fontsize=10) ax.set_ylabel('Number of peptides') ax.set_xlim((-1, len(table))) ax.set_xticks(range(len(table))) ax.set_xticklabels(table.sort_values('mass shift')['mass shift'].apply('{:.2f}'.format)) total = table['# peptides in bin'].sum() vdist = table['# peptides in bin'].max() * 0.01 max_height = 0 for i, patch in enumerate(ax.patches): current_height = patch.get_height() if current_height > max_height: max_height = current_height ax.text(patch.get_x() + width / 2, current_height + vdist, '{:>6.2%}'.format(table.at[table.index[i], '# peptides in bin'] / total), fontsize=10, color='dimgrey', ha='center') plt.ylim(0, max_height * 1.2) plt.tight_layout() plt.savefig(os.path.join(save_directory, 'summary.png')) # dpi=500 plt.savefig(os.path.join(save_directory, 'summary.svg')) plt.close()
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/stats.py
stats.py
import subprocess import os import shutil from . import AA_stat, utils, io import argparse import logging import sys """ Created on Sun Jan 26 15:41:40 2020 @author: julia """ OS_PARAMS_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'open_search.params') logger = logging.getLogger(__name__) DICT_AA = { 'add_G_glycine' : 'G', 'add_A_alanine' : 'A', 'add_S_serine' : 'S', 'add_P_proline' : 'P', 'add_V_valine' : 'V', 'add_T_threonine' : 'T', 'add_C_cysteine' : 'C', 'add_L_leucine' : 'L', 'add_I_isoleucine' : 'I', 'add_N_asparagine' : 'N', 'add_D_aspartic_acid' : 'D', 'add_Q_glutamine' : 'Q', 'add_K_lysine' : 'K', 'add_E_glutamic_acid' : 'E', 'add_M_methionine' : 'M', 'add_H_histidine' : 'H', 'add_F_phenylalanine' : 'F', 'add_R_arginine' : 'R', 'add_Y_tyrosine' : 'Y', 'add_W_tryptophan' : 'W', 'add_Cterm_peptide' : 'C-term', 'add_Nterm_peptide' : 'N-term', } def main(): pars = argparse.ArgumentParser() pars.add_argument('--params', help='CFG file with parameters. If there is no file, AA_stat uses default one. ' 'An example can be found at https://github.com/SimpleNumber/aa_stat', required=False) pars.add_argument('--MSFragger', help='Path to MSFragger .jar file. ' 'If not specified, MSFRAGGER environment variable is used.') pars.add_argument('--dir', help='Directory to store the results. Default value is current directory.', default='.') pars.add_argument('-v', '--verbosity', type=int, choices=range(4), default=1, help='Output verbosity.') input_spectra = pars.add_mutually_exclusive_group(required=True) input_spectra.add_argument('--mgf', nargs='+', help='MGF files to search.', default=None) input_spectra.add_argument('--mzml', nargs='+', help='mzML files to search.', default=None) pars.add_argument('-db', '--fasta', help='FASTA file with decoys for open search. None: with included MSFragger parameters, ' 'the database is expected to contain decoys. Default decoy prefix is "rev_".' ' If it differs, do not forget to specify it in AA_stat params file.') pars.add_argument('--os-params', help='Custom open search parameters.') pars.add_argument('-x', '--optimize-fixed-mods', help='Run multiple searches, automatically determine which fixed modifications to apply.', action='store_true', default=False) pars.add_argument('-s', '--skip', help='Skip search if pepXML files exist already. If not specified, ' 'no steps are skipped. If specified without value, first step may be skipped. ' 'Value is number of steps to skip. Only works with "-x".', nargs='?', default=0, const=1, type=int) pars.add_argument('-je', '--java-executable', default='java') pars.add_argument('-ja', '--java-args', default='') args = pars.parse_args() levels = [logging.WARNING, logging.INFO, logging.DEBUG, utils.INTERNAL] logging.basicConfig(format='{levelname:>8}: {asctime} {message}', datefmt='[%H:%M:%S]', level=levels[args.verbosity], style='{') if not args.MSFragger: args.MSFragger = os.environ.get('MSFRAGGER') if not args.MSFragger: logger.critical('Please specify --MSFragger or set MSFRAGGER environment variable.') sys.exit(1) logger.info("Starting MSFragger and AA_stat pipeline.") spectra = args.mgf or args.mzml spectra = [os.path.abspath(i) for i in spectra] working_dir = args.dir if args.optimize_fixed_mods: logger.debug('Skipping up to %d steps.', args.skip) step = 1 fix_mod_dict = {} while True: logger.info('Starting step %d.', step) fig_data, aastat_table, locmod, data_dict, new_fix_mod_dict, var_mod_dict = run_step_os( spectra, 'os_step_{}'.format(step), working_dir, args, change_dict=fix_mod_dict, step=step) if new_fix_mod_dict: for k, v in new_fix_mod_dict.items(): fix_mod_dict.setdefault(k, 0.) fix_mod_dict[k] += data_dict[v][0] step += 1 else: break try: if os.path.isfile(os.path.join(working_dir, 'report.html')): logger.debug('Removing existing report.html.') os.remove(os.path.join(working_dir, 'report.html')) os.symlink(os.path.join('os_step_1', 'report.html'), os.path.join(working_dir, 'report.html')) except Exception as e: logger.debug('Can\'t create symlink to report: %s', e) else: logger.debug('Symlink created successfully.') logger.info('Stopping after %d steps.', step) else: logger.info('Running one-shot search.') folder_name = '' run_step_os(spectra, folder_name, working_dir, args) def get_pepxml(input_file, d=None): initial = os.path.splitext(input_file)[0] + '.pepXML' if d is None: return initial sdir, f = os.path.split(initial) return os.path.join(d, f) def run_os(java, jargs, spectra, msfragger, save_dir, parameters): command = [java] + jargs + ['-jar', msfragger, parameters, *spectra] logger.debug('Running command: %s', ' '.join(command)) retval = subprocess.call(command) logger.debug('Subprocess returned %s', retval) if retval: logger.critical('MSFragger returned non-zero code %s. Exiting.', retval) sys.exit(retval) os.makedirs(save_dir, exist_ok=True) for s in spectra: pepxml = get_pepxml(s) if os.path.normpath(os.path.dirname(pepxml)) != os.path.normpath(save_dir): logger.debug('Moving %s to %s', pepxml, save_dir) shutil.move(pepxml, get_pepxml(s, save_dir)) else: logger.debug('No need to move pepXML file.') def create_os_params(output, original=None, mass_shifts=None, fastafile=None): original = original or OS_PARAMS_DEFAULT with open(output, 'w') as new_params, open(original) as default: for line in default: key = line.split('=')[0].strip() if key == 'database_name' and fastafile: new_params.write('database_name = {}\n'.format(fastafile)) elif mass_shifts and DICT_AA.get(key) in mass_shifts: aa = DICT_AA[key] new_params.write(key + ' = ' + str(mass_shifts[aa]) + '\n') else: new_params.write(line) def run_step_os(spectra, folder_name, working_dir, args, change_dict=None, step=None): dir = os.path.abspath(os.path.join(working_dir, folder_name)) os.makedirs(dir, exist_ok=True) os_params_path = os.path.abspath(os.path.join(working_dir, folder_name, 'os.params')) create_os_params(os_params_path, args.os_params, change_dict, args.fasta) pepxml_names = [get_pepxml(s, dir) for s in spectra] run = True if step is not None: if step <= args.skip: run = not all(os.path.isfile(f) for f in pepxml_names) logger.debug('On step %d, need to run search: %s', step, run) else: logger.debug('Can\'t skip step %d, running.', step) if run: run_os(args.java_executable, args.java_args.split(), spectra, args.MSFragger, dir, os_params_path) else: logger.info('Skipping search.') args.pepxml = pepxml_names args.csv = None args.dir = dir params_dict = io.get_params_dict(args) return AA_stat.AA_stat(params_dict, args, step=step)
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/aa_search.py
aa_search.py
import argparse import logging import os from . import AA_stat, utils, io def main(): pars = argparse.ArgumentParser(epilog='Instead of file lists, you can pass directory names. ' 'This will process all files in the directory.') pars.add_argument('--params', help='CFG file with parameters. If there is no file, AA_stat uses default one. ' 'An example can be found at https://github.com/SimpleNumber/aa_stat', required=False) pars.add_argument('--dir', help='Directory to store the results. Default value is current directory.', default='.') pars.add_argument('-v', '--verbosity', type=int, choices=range(4), default=1, help='Output verbosity.') input_spectra = pars.add_mutually_exclusive_group() input_spectra.add_argument('--mgf', nargs='+', help='MGF files to localize modifications.') input_spectra.add_argument('--mzml', nargs='+', help='mzML files to localize modifications.') input_file = pars.add_mutually_exclusive_group(required=True) input_file.add_argument('--pepxml', nargs='+', help='List of input files in pepXML format.') input_file.add_argument('--csv', nargs='+', help='List of input files in CSV format.') pars.add_argument('--fmods', help='Fixed modifications specified in the search (needed with CSV input). ' 'Example: +57.0215 @ C, +229.1630 @ N-term') pars.add_argument('--vmods', help='Variable modifications specified in the search (needed with CSV input). ' 'Example: 15.9959 @ M, 42.0106 @ N-term') pars.add_argument('--enzyme', help='Enzyme specificity set in the search (needed with CSV input).') pars.add_argument('-n', '--processes', type=int, help='Maximum number of processes to use.') args = pars.parse_args() levels = [logging.WARNING, logging.INFO, logging.DEBUG, utils.INTERNAL] logging.basicConfig(format='{levelname:>8}: {asctime} {message}', datefmt='[%H:%M:%S]', level=levels[args.verbosity], style='{') # Performance optimizations as per https://docs.python.org/3/howto/logging.html#optimization logging._srcfile = None logging.logThreads = 0 logging.logProcesses = 0 logger = logging.getLogger(__name__) logging.getLogger('matplotlib').setLevel(logging.WARNING) logger.info('Starting...') io.resolve_filenames(args) total_inputs = len(args.csv or []) + len(args.pepxml or []) if not total_inputs: logger.error('No input files found.') return params_dict = io.get_params_dict(args) logger.debug(params_dict) os.makedirs(args.dir, exist_ok=True) AA_stat.AA_stat(params_dict, args) logger.info('Done.')
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/main.py
main.py
import os import operator import logging import pandas as pd import numpy as np import warnings from collections import Counter import re import pathlib import itertools as it from pyteomics import parser, pepxml, mass logger = logging.getLogger(__name__) MASS_FORMAT = '{:+.4f}' UNIMOD = mass.Unimod(pathlib.Path(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'unimod.xml')).as_uri()) INTERNAL = 5 DIFF_C13 = mass.calculate_mass(formula='C[13]') - mass.calculate_mass(formula='C') H = mass.nist_mass['H+'][0][0] def internal(*args, **kwargs): """Emit log message with level INTERNAL, which is lower than DEBUG.""" logger.log(INTERNAL, *args, **kwargs) def mass_format(mass): return MASS_FORMAT.format(mass) def make_0mc_peptides(pep_list, rule): """b, y In silico cleaves all peptides with a given rule. Parameters ---------- pep_list : Iterable An iterable of peptides rule : str or compiled regex. Cleavage rule in pyteomics format. Returns ------- Set of fully cleaved peptides. """ out_set = set() for i in pep_list: out_set.update(parser.cleave(i, rule)) return out_set def fdr_filter_mass_shift(mass_shift, data, params_dict): shifts = params_dict['mass_shifts_column'] ms_shift = data.loc[np.abs(data[shifts] - mass_shift[1]) < mass_shift[2], shifts].mean() mask = np.abs(data[shifts] - mass_shift[1]) < 3 * mass_shift[2] internal('Mass shift %.3f +- 3 * %.3f', mass_shift[1], mass_shift[2]) data_slice = data.loc[mask].sort_values(by=[params_dict['score_column'], params_dict['spectrum_column']], ascending=params_dict['score_ascending']).drop_duplicates(subset=params_dict['peptides_column']) internal('%d peptide rows selected for filtering', data_slice.shape[0]) with warnings.catch_warnings(): warnings.simplefilter("ignore") df = pepxml.filter_df(data_slice, key=params_dict['score_column'], fdr=params_dict['FDR'], reverse=not params_dict['score_ascending'], correction=params_dict['FDR_correction'], is_decoy='is_decoy') internal('Filtered data for %s: %d rows', mass_shift, df.shape[0]) return ms_shift, df def group_specific_filtering(data, mass_shifts, params_dict): """ Selects window around found mass shift and filters using TDA. Window is defined as mean +- sigma. Parameters ---------- data : DataFrame DF with all open search data. mass_shifts: numpy array Output of utils.fit_peaks function (poptperr matrix). An array of Gauss fitted mass shift parameters and their tolerances. [[A, mean, sigma, A_error, mean_error, sigma_error],...] params_dict : dict Dict with paramenters for parsing csv file. `mass_shifts_column`, `FDR`, `FDR_correction`, `peptides_column` Returns ------- Dict with mass shifts (in str format) as key and values is a DF with filtered PSMs. """ logger.info('Performing group-wise FDR filtering...') out_data = {} for ind, ms in enumerate(mass_shifts): if ind != len(mass_shifts) - 1: diff = abs(ms[1] - mass_shifts[ind + 1][1]) width_sum = 3 * (ms[2] + mass_shifts[ind + 1][2]) if diff < width_sum: coef = width_sum / diff ms[2] /= coef mass_shifts[ind + 1][2] /= coef logger.debug('Mass shifts %.3f and %.3f are too close, dividing their sigma by %.4f', ms[1], mass_shifts[ind + 1][1], coef) shift, df = fdr_filter_mass_shift(ms, data, params_dict) if len(df) > 0: # shift = np.mean(df[shifts]) ###!!!!!!!mean of from fit!!!! out_data[mass_format(shift)] = (shift, df) logger.info('# of filtered mass shifts = %s', len(out_data)) return out_data def check_composition(peptide, aa_labels): ''' Checks composition of peptides. Parameters ---------- peptide: str Peptide sequence aa_labels: list list of acceptable aa. Returns ------- True if accebtable, False overwise. ''' return set(peptide) < set(aa_labels) def get_varmod_combinations(recommended_vmods, values, tolerance): logger.debug('Received recommended vmods: %s', recommended_vmods) counter = Counter(aa for aa, shift in recommended_vmods) eligible = {aa for aa, count in counter.items() if count >= 3} out = {} if eligible: for i, (aa, shift) in enumerate(recommended_vmods): if aa == 'isotope error' or aa not in eligible: continue candidates = [(aac, shiftc) for aac, shiftc in recommended_vmods if aac == aa and shiftc != shift] for c1, c2 in it.combinations(candidates, 2): if abs(values[c1[1]] + values[c2[1]] - values[shift]) <= tolerance: out[i] = (c1[1], c2[1]) return out def get_opposite_mods(fmods, rec_fmods, rec_vmods, values, tolerance): fmods = masses_to_mods(fmods) for aa, mod in rec_fmods.items(): if aa in fmods: fmods[aa] = fmods[aa] + values[mod] else: fmods[aa] = values[mod] logger.debug('Calculating opposites using effective fixed mod dict: %s', fmods) vmod_idx = [] for aaf, fmod in fmods.items(): for i, (aav, vmod) in enumerate(rec_vmods): if aaf == aav and abs(fmod + values[vmod]) < tolerance: vmod_idx.append(i) return vmod_idx def find_isotopes(ms, peptides_in_bin, tolerance=0.01): """ Find the isotopes between mass shifts using mass difference of C13 and C12, information of amino acids statistics as well. Paramenters ----------- ms : Series Series with mass in str format as index and values float mass shift. peptides_in_bin : Series Series with # of peptides in each mass shift. tolerance : float Tolerance for isotop matching. Returns ------- DataFrame with 'isotop'(boolean) and 'monoisotop_index' columns. """ out = pd.DataFrame({'isotope': False, 'monoisotop_index': None}, index=ms.index) np_ms = ms.to_numpy() difference_matrix = np.abs(np_ms.reshape(-1, 1) - np_ms.reshape(1, -1) - DIFF_C13) isotop, monoisotop = np.where(difference_matrix < tolerance) logger.debug('Found %d potential isotopes.', isotop.sum()) out.iloc[isotop, 0] = True out.iloc[isotop, 1] = out.iloc[monoisotop, :].index for i, row in out.iterrows(): if row['isotope']: if peptides_in_bin[i] > peptides_in_bin[row['monoisotop_index']]: out.at[i, 'isotope'], out.at[i, 'monoisotop_index'] = False, None return out def get_candidates_from_unimod(mass_shift, tolerance, unimod_df): """ Find modifications for `mass_shift` in Unimod.org database with a given `tolerance`. Paramenters ----------- mass_shift : float Modification mass in Da. tolerance : float Tolerance for the search in Unimod db. unimod_df : DataFrame DF with all unimod mo9difications. Returns ------- List of amino acids. """ ind = abs(unimod_df['mono_mass'] - mass_shift) < tolerance sites_set = set() accessions = set() for i, row in unimod_df.loc[ind].iterrows(): sites_set.update(s['site'] if s['position'][:3] == 'Any' else s['position'] for s in row['specificity']) accessions.add(row['record_id']) return sites_set, accessions def find_mod_sum(x, index, sum_matrix, tolerance): """ Finds mass shift that are sum of given mass shift and other mass shift results in already existing mass shift. Parameters ---------- x : float Mass shift that considered as a component of a modification. index : dict Map for mass shift indexes and their values. sum_matrix : numpy 2D array Matrix of sums for all mass shifts. tolerance: float Matching tolerance in Da. Returns ------- List of tuples. """ rows, cols = np.where(np.abs(sum_matrix - x) < tolerance) i = rows <= cols if rows.size: return list(zip(index[rows[i]], index[cols[i]])) return None def find_sums(ms, tolerance=0.005): """ Finds the sums of mass shifts in Series, if it exists. Parameters ---------- ms : Series Series with mass in str format as index and values float mass shift. tolerance : float Matching tolerance in Da. Returns ------- Series with pairs of mass shift for all mass shifts. """ zero = mass_format(0.0) if zero in ms.index: col = ms.drop(zero) else: col = ms logger.info('Zero mass shift not found in candidates.') values = col.values sum_matrix = values.reshape(-1, 1) + values.reshape(1, -1) out = col.apply(find_mod_sum, args=(col.index, sum_matrix, tolerance)) return out def apply_var_mods(seq, mods): parsed = parser.parse(seq) out = [] for i, aa in enumerate(parsed): if i in mods: out.append('{{{:+.0f}}}'.format(mods[i]) + aa) else: out.append(aa) seqout = ''.join(out) internal('%s + %s = %s', seq, mods, seqout) return seqout def get_column_with_mods(row, params_dict): peptide = params_dict['peptides_column'] mods = get_var_mods(row, params_dict) return apply_var_mods(row[peptide], mods) def format_isoform(row, params_dict): ms = row['mod_dict'] seq = row['top isoform'] pc, nc, mc = operator.itemgetter('prev_aa_column', 'next_aa_column', 'mods_column')(params_dict) prev_aa, next_aa = operator.itemgetter(pc, nc)(row) mods = get_var_mods(row, params_dict) seq = apply_var_mods(seq, mods) sequence = re.sub(r'([a-z])([A-Z])', lambda m: '{}[{:+.0f}]'.format(m.group(2), float(ms[m.group(1)])), seq) return '{}.{}.{}'.format(prev_aa[0], sequence, next_aa[0]) def get_fix_var_modifications(pepxml_file, labels): fout, vout = {}, [] p = pepxml.PepXML(pepxml_file, use_index=False) mod_list = list(p.iterfind('aminoacid_modification')) logger.debug('mod_list: %s', mod_list) p.reset() term_mods = list(p.iterfind('terminal_modification')) logger.debug('term_mods: %s', term_mods) p.close() for m in mod_list: if m['aminoacid'] not in labels: continue if 'peptide_terminus' in m: key = '{}-term {}'.format(m['peptide_terminus'].upper(), m['aminoacid']) else: key = m['aminoacid'] if m['variable'] == 'N': fout[key] = m['mass'] else: vout.append((key, m['massdiff'])) for m in term_mods: if m['variable'] == 'N': if m['terminus'] == 'N': fout['H-'] = m['mass'] else: fout['-OH'] = m['mass'] else: key = ('Protein ' if m.get('protein_terminus') == 'Y' else '') + m['terminus'] + '-term' vout.append((key, m['massdiff'])) return fout, vout def get_specificity(pepxml_file): with pepxml.PepXML(pepxml_file, use_index=False) as p: s = next(p.iterfind('specificity')) logger.debug('Extracted enzyme specificity: %s', s) return s def parse_l10n_site(site): aa, shift = site.split('_') return aa, shift def mass_to_mod(label, value, aa_mass=mass.std_aa_mass): words = label.split() if len(words) > 1: # terminal mod label = words[-1] return value - aa_mass.get(label, 0) def masses_to_mods(d, fix_mod=None): aa_mass = mass.std_aa_mass.copy() aa_mass['H-'] = 1.007825 aa_mass['-OH'] = 17.00274 if fix_mod: aa_mass.update(fix_mod) d = {k: mass_to_mod(k, v, aa_mass) for k, v in d.items()} if 'H-' in d: d['N-term'] = d.pop('H-') if '-OH' in d: d['C-term'] = d.pop('-OH') return d def get_var_mods(row, params_dict): # produce a dict for specific PSM: position (int) -> mass shift (float) modifications = row[params_dict['mods_column']] peptide = params_dict['peptides_column'] mass_dict_0 = mass.std_aa_mass.copy() mass_dict_0['H-'] = 1.007825 mass_dict_0['-OH'] = 17.00274 mass_dict_0.update(params_dict['fix_mod']) mod_dict = {} if modifications: internal('Got modifications for peptide %s: %s', row[peptide], modifications) for m in modifications: # internal('Parsing modification: %s', m) mmass, pos = m.split('@') mmass = float(mmass) pos = int(pos) if pos == 0: key = 'H-' elif pos == len(row[peptide]) + 1: key = '-OH' else: key = row[peptide][pos-1] if abs(mmass - mass_dict_0[key]) > params_dict['frag_acc']: # utils.internal('%s modified in %s at position %s: %.3f -> %.3f', key, row[peptide], pos, mass_dict_0[key], mmass) mod_dict[pos] = mmass - mass_dict_0[key] if mod_dict: internal('Final mod dict: %s', mod_dict) return mod_dict def format_grouped_keys(items, params_dict): out = [] for k, td in items: if k[1:] == '-term': t = k[0] if isinstance(td, list): keys, values = zip(*td) diff = max(values) - min(values) label_condition = set(keys) >= set(params_dict['labels']) if diff < params_dict['prec_acc'] and label_condition: out.append((k, values[0])) # arbitrary amino acid, they all have the same modification logger.debug('Collapsing %s-terminal mods.', t) else: logger.debug('Not collapsing %s-term dict: diff in values is %.3f, set of labels condition is %ssatisfied', t, diff, '' if label_condition else 'not ') for aa, v in td: out.append((k + ' ' + aa, v)) else: out.append((k, td)) else: out.append((k, td)) logger.debug('Variable mods with grouped keys: %s', out) return out def group_terminal(items): grouped = [] tg = {} for k, v in items: prefix, protein, term, aa = re.match(r'((Protein)?(?: )?([NC]-term)?)(?: )?([A-Z])?', k).groups() if term is None or aa is None: grouped.append((k, v)) else: tg.setdefault(prefix, []).append((aa, v)) grouped.extend(tg.items()) logger.debug('Variable mods after grouping: %s', grouped) return grouped def format_mod_dict_str(d): if d: return ', '.join('{} @ {}'.format(v, k) for k, v in d.items()) return 'none' def format_mod_dict(d): if d: return ', '.join('{} @ {}'.format(mass_format(v), k) for k, v in d.items()) return 'none' def format_mod_list(items): if items: return ', '.join('{} @ {}'.format(v, k) for k, v in items) return 'none' def get_isotope_shift(label, locmod_df): isotope = locmod_df[locmod_df['isotope index'] == label] if not isotope.shape[0]: return return isotope[isotope['# peptides in bin'] == isotope['# peptides in bin'].max()].index[0] def format_localization_key(site, ms): if not isinstance(ms, str): ms = mass_format(ms) return site + '_' + ms def measured_mz_series(df, params_dict): return (df[params_dict['measured_mass_column']] + df[params_dict['charge_column']] * H ) / df[params_dict['charge_column']] def calculated_mz_series(df, params_dict): return (df[params_dict['calculated_mass_column']] + df[params_dict['charge_column']] * H ) / df[params_dict['charge_column']] def format_list(lst, sep1=', ', sep2=' or '): lst = list(lst) if not lst: return '' if len(lst) == 1: return lst[0] *most, last = lst return sep1.join(most) + sep2 + last def find_mass_shift(value, data_dict, tolerance): s = sorted(data_dict, key=lambda x: abs(value - data_dict[x][0])) if abs(data_dict[s[0]][0] - value) <= tolerance: return s[0] def loc_positions(top_isoform): return [i for i, a in enumerate(top_isoform, 1) if len(a) > 1] def choose_correct_massdiff(reported, calculated, params_dict): maxdiff = np.abs(reported - calculated).max() if maxdiff < params_dict['bin_width'] / 2: logger.debug('Maximum mass diff is within bounds: %.4f', maxdiff) return reported elif maxdiff < params_dict['prec_acc']: logger.warning('Reported mass shifts have a high calculation error (%.4f).' ' Using own calculations', maxdiff) return calculated else: logger.warning('Reported mass shifts differ from calculated values (up to %.4f).' ' Using the reported values. Consider reporting this to the developers.', maxdiff) return reported def convert_tandem_cleave_rule_to_regexp(cleavage_rule, params_dict): def get_sense(c_term_rule, n_term_rule): if '{' in c_term_rule: return 'N' elif '{' in n_term_rule: return 'C' else: if len(c_term_rule) <= len(n_term_rule): return 'C' else: return 'N' def get_cut(cut, no_cut): aminoacids = set(params_dict['labels']) cut = ''.join(aminoacids & set(cut)) if '{' in no_cut: no_cut = ''.join(aminoacids & set(no_cut)) return cut, no_cut else: no_cut = ''.join(set(params_dict['labels']) - set(no_cut)) return cut, no_cut protease = cleavage_rule.replace('X', ''.join(params_dict['labels'])) c_term_rule, n_term_rule = protease.split('|') sense = get_sense(c_term_rule, n_term_rule) if sense == 'C': cut, no_cut = get_cut(c_term_rule, n_term_rule) else: cut, no_cut = get_cut(n_term_rule, c_term_rule) return {'sense': sense, 'cut': cut, 'no_cut': no_cut} def parse_mod_list(s, kind): pairs = re.split(r'\s*[,;]\s*', s) if kind == 'fixed': out = {} elif kind == 'variable': out = [] else: raise ValueError('`kind` must be "fixed" or "variable", not "{}".'.format(kind)) for p in pairs: if p: m, aa = re.split(r'\s*@\s*', p) m = float(m) if kind == 'fixed': if aa == 'N-term': out['H-'] = 1.007825 + m elif aa == 'C-term': out['-OH'] = 17.00274 + m else: out[aa] = mass.std_aa_mass[aa] + m else: out.append((aa, m)) return out
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/utils.py
utils.py
import pandas as pd import numpy as np import os import sys from collections import defaultdict from scipy.stats import ttest_ind import logging import warnings from pyteomics import parser from . import utils, localization, html, io, stats, recommendations logger = logging.getLogger(__name__) def get_peptide_statistics(peptide_list): ''' Calculates presence of amino acid in peptide sequences. Parameters ---------- peptide_list : Iterable An iterable of peptides, that are already fully cleaved. Returns ------- out : dict with amino acids as a key and its persentage of peptides with it as a value. ''' sum_aa = 0 pep_set = set(peptide_list) d = defaultdict(int) for seq in pep_set: for let in set(seq): d[let] += 1 sum_aa += 1 for i in d: d[i] = int(100 * d[i] / sum_aa) return d def get_aa_distribution(peptide_list, rule): ''' Calculates amino acid statistics for peptide list. In silico cleaves peptides to get fully cleaved set of peptides. Parameters ---------- peptide_list : Iterable An iterable of peptides. rule : str or compiled regex. Cleavage rule in pyteomics format. Returns ------- out : dict with amino acids as a key and its persentage as a value. ''' sum_aa = 0 pep_set = utils.make_0mc_peptides(peptide_list, rule) d = defaultdict(int) for seq in pep_set: for let in seq: d[let] += 1 sum_aa += 1 for i in d: d[i] /= sum_aa return d def make_table(distributions, number_of_PSMs, mass_shifts, reference_label): ''' Prepares amino acid statistis result table. Parameters ---------- distributions : DataFrame Amino acids statistics, where indexes are amino acids, columns mass shifts (str) number_of_PSMs : Series Indexes are mass shifts (in str format) and values are numbers of filtered PSMs mass_shifts : dict Mass shift in str format (rounded) -> actual mass shift (float) Returns ------- A table with mass shifts, psms, amino acid statistics columns. ''' df = pd.DataFrame({'mass shift': [mass_shifts[k] for k in distributions.columns], '# peptides in bin': number_of_PSMs}, index=distributions.columns) df['# peptides in bin'] = df['# peptides in bin'].astype(np.int64) out = pd.concat([df, distributions.T], axis=1) out['is reference'] = df.index == reference_label return out def calculate_error_and_p_vals(pep_list, err_ref_df, reference, rule, aas): ''' Calculates p-values and error standard deviation of amino acids statistics using bootstraping method. Parameters ---------- pep_list : Iterable An iterable of peptides. err_ref_df : Series Indexes are amino acids and values are stds of a `reference` mass shift. reference : Series Indexes are amino acids and values are amino acids statistics of a reference mass shift. rule : str or compiled regex. Cleavage rule in pyteomics format. aas: Iterable An Iterable of amino acids to be considered. Returns ------- Series of p-values, std of amino acid statistics for considered `pep_list`. ''' d = pd.DataFrame(index=aas) for i in range(50): d[i] = pd.Series(get_aa_distribution( np.random.choice(np.array(pep_list), size=(len(pep_list) // 2), replace=False), rule)) / reference p_val = pd.Series() for i in aas: p_val[i] = ttest_ind(err_ref_df.loc[i, :], d.loc[i, :])[1] return p_val, d.std(axis=1) def get_zero_mass_shift(mass_shift_data_dict, params_dict): """ Shift of non-modified peak. Finds zero mass shift. Parameters ---------- mass_shift_data_dict : dict dict of mass shifts. params_dict: dict Returns ------- Mass shift label, Mass shift in float format. """ values = [v[0] for v in mass_shift_data_dict.values()] keys = list(mass_shift_data_dict.keys()) data = [v[1] for v in mass_shift_data_dict.values()] lref = np.argmin(np.abs(values)) maxbin = max(df.shape[0] for df in data) logger.debug('Closest to zero: %s, with %d peptides. Top mass shift has %d peptides.', keys[lref], data[lref].shape[0], maxbin) if abs(values[lref]) > params_dict['zero bin tolerance'] or data[lref].shape[0] / maxbin < params_dict['zero min intensity']: logger.warning('Too few unmodified peptides. Mass shift with most identifications will be the reference.') identifications = [df.shape[0] for df in data] lref = np.argmax(identifications) return keys[lref], values[lref] def check_difference(shift1, shift2, tolerance=0.05): """ Checks two mass shifts means to be closer than the sum of their std. Parameters ---------- shift1 : List list that describes mass shift. On the first position have to be mean of mass shift, on second position have to be std. shift2 : List list that describes mass shift. On the first position have to be mean of mass shift, on second position have to be std. tolerance : float Matching tolerance in Da. Returns ------- out : bool """ mean_diff = (shift1[1] - shift2[1]) ** 2 sigma_diff = (shift1[2] + shift2[2]) ** 2 res = mean_diff > sigma_diff if abs(shift1[1] - shift2[1]) < tolerance: res = False return res def filter_mass_shifts(results, tolerance=0.05): """ Merges close mass shifts. If difference between means of two mass shifts less than sum of sigmas, they are merged. Parameters ---------- results : numpy array Output of utils.fit_peaks function (poptperr matrix). An array of Gauss fitted mass shift parameters and their tolerances. [[A, mean, sigma, A_error, mean_error, sigma_error],...] tolerance : float Matching tolerance in Da. Returns ------- Updated poptperr matrix. """ logger.info('Discarding bad peaks...') temp = [] out = [] if not results.size: return [] if results.size == 1: return [results[0]] temp = [results[0]] for mass_shift in results[1:]: if check_difference(temp[-1], mass_shift, tolerance=tolerance): if len(temp) > 1: logger.info('Joined mass shifts %s', ['{:0.4f}'.format(x[1]) for x in temp]) out.append(max(temp, key=lambda x: x[0])) temp = [mass_shift] else: temp.append(mass_shift) out.append(max(temp, key=lambda x: x[0])) logger.info('Peaks for subsequent analysis: %s', len(out)) return out def calculate_statistics(mass_shifts_dict, reference_label, params_dict, args): """ Calculates amino acid statistics, relative amino acids presence in peptides for all mass shifts. Paramenters ----------- mass_shifts_dict : dict A dict with mass shifts (in str format) as key and values is a DF with filtered PSMs. zero_mass_shift : float Reference mass shift. params_dict : dict Dict with paramenters for parsing csv file. `labels`, `rule`, `peptides_column` and other params Returns ------- DF with amino acid statistics, Series with number of PSMs and dict of data for mass shift figures. """ logger.info('Calculating distributions...') labels = params_dict['labels'] rule = params_dict['rule'] expasy_rule = parser.expasy_rules.get(rule, rule) save_directory = args.dir peptides = params_dict['peptides_column'] reference_bin = mass_shifts_dict[reference_label][1] number_of_PSMs = dict() # pd.Series(index=list(mass_shifts_labels.keys()), dtype=int) reference = pd.Series(get_aa_distribution(reference_bin[peptides], expasy_rule)) reference.fillna(0, inplace=True) # bootstraping for errors and p values calculation in reference (zero) mass shift err_reference_df = pd.DataFrame(index=labels) for i in range(50): err_reference_df[i] = pd.Series(get_aa_distribution( np.random.choice(np.array(reference_bin[peptides]), size=(len(reference_bin) // 2), replace=False), expasy_rule)) / reference logger.info('Mass shifts:') distributions = pd.DataFrame(index=labels) p_values = pd.DataFrame(index=labels) figure_args = {} for ms_label, (ms, ms_df) in mass_shifts_dict.items(): aa_statistics = pd.Series(get_aa_distribution(ms_df[peptides], expasy_rule)) peptide_stat = pd.Series(get_peptide_statistics(ms_df[peptides]), index=labels) number_of_PSMs[ms_label] = len(ms_df) aa_statistics.fillna(0, inplace=True) distributions[ms_label] = aa_statistics / reference with warnings.catch_warnings(): warnings.simplefilter("ignore") p_vals, errors = calculate_error_and_p_vals(ms_df[peptides], err_reference_df, reference, expasy_rule, labels) # errors.fillna(0, inplace=True) p_values[ms_label] = p_vals distributions.fillna(0, inplace=True) figure_args[ms_label] = (len(ms_df), [distributions[ms_label], errors], peptide_stat.fillna(0)) logger.info('%s Da', ms_label) pout = p_values.T pout.fillna(0).to_csv(os.path.join(save_directory, 'p_values.csv'), index=False) return distributions, pd.Series(number_of_PSMs), figure_args def systematic_mass_shift_correction(mass_shifts_dict, mass_correction): """ Parameters ---------- mass_shifts_dict : dict A dict with in the format: `mass_shift_label`: `(mass_shift_value, filtered_peptide_dataframe)`. mass_correction: float Mass of reference (zero) mass shift, that should be moved to 0.0 Returns ------- out : dict Updated `mass_shifts_dict` """ out = {} for k, v in mass_shifts_dict.items(): corr_mass = v[0] - mass_correction out[utils.mass_format(corr_mass)] = (corr_mass, v[1]) return out def AA_stat(params_dict, args, step=None): """ Calculates all statistics, saves tables and pictures. """ save_directory = args.dir logger.debug('Fixed modifications: %s', params_dict['fix_mod']) logger.debug('Variable modifications: %s', params_dict['var_mod']) logger.info('Using fixed modifications: %s.', utils.format_mod_dict(utils.masses_to_mods(params_dict['fix_mod']))) logger.info('Variable modifications in search results: %s.', utils.format_mod_list(params_dict['var_mod'])) logger.debug('Enzyme specificity: %s', params_dict['enzyme']) data = io.read_input(args, params_dict) if data is None: sys.exit(1) popt_pvar = stats.fit_peaks(data, args, params_dict) # logger.debug('popt_pvar: %s', popt_pvar) final_mass_shifts = filter_mass_shifts(popt_pvar, tolerance=params_dict['shift_error'] * params_dict['bin_width']) # logger.debug('final_mass_shifts: %s', final_mass_shifts) mass_shift_data_dict = utils.group_specific_filtering(data, final_mass_shifts, params_dict) del data # logger.debug('mass_shift_data_dict: %s', mass_shift_data_dict) if not mass_shift_data_dict: html.render_html_report(None, mass_shift_data_dict, None, params_dict, {}, {}, {}, [], save_directory, [], step=step) return None, None, None, mass_shift_data_dict, {} reference_label, reference_mass_shift = get_zero_mass_shift(mass_shift_data_dict, params_dict) if abs(reference_mass_shift) < params_dict['zero bin tolerance']: logger.info('Systematic mass shift equals %s', reference_label) if params_dict['calibration'] != 'off': mass_shift_data_dict = systematic_mass_shift_correction(mass_shift_data_dict, reference_mass_shift) reference_mass_shift = 0.0 reference_label = utils.mass_format(0.0) else: logger.info('Leaving systematic shift in place (calibration disabled).') else: logger.info('Reference mass shift is %s', reference_label) ms_labels = {k: v[0] for k, v in mass_shift_data_dict.items()} logger.debug('Final shift labels: %s', ms_labels.keys()) distributions, number_of_PSMs, figure_data = calculate_statistics(mass_shift_data_dict, reference_label, params_dict, args) table = make_table(distributions, number_of_PSMs, ms_labels, reference_label) stats.summarizing_hist(table, save_directory) logger.info('Summary histogram saved.') # table.index = table['mass shift'].apply(utils.mass_format) table[['is isotope', 'isotope index']] = utils.find_isotopes( table['mass shift'], table['# peptides in bin'], tolerance=params_dict['prec_acc']) table.at[reference_label, 'is isotope'] = False table.at[reference_label, 'isotope index'] = None logger.debug('Isotopes:\n%s', table.loc[table['is isotope']]) u = utils.UNIMOD.mods unimod_df = pd.DataFrame(u) table['unimod candidates'], table['unimod accessions'] = zip(*table['mass shift'].apply( lambda x: utils.get_candidates_from_unimod(x, params_dict['prec_acc'], unimod_df))) table['sum of mass shifts'] = utils.find_sums(table.loc[~table['is isotope'], 'mass shift'], tolerance=params_dict['shift_error'] * params_dict['bin_width']) logger.debug('Sums of mass shifts:\n%s', table.loc[table['sum of mass shifts'].notna()]) table.to_csv(os.path.join(save_directory, 'aa_statistics_table.csv'), index=False) spectra_dict = io.read_spectra(args) if spectra_dict: if args.mgf: params_dict['mzml_files'] = False else: params_dict['mzml_files'] = True logger.info('Starting Localization using MS/MS spectra...') ms_labels = pd.Series(ms_labels) locmod_df = table[['mass shift', '# peptides in bin', 'is isotope', 'isotope index', 'sum of mass shifts', 'unimod candidates', 'unimod accessions']].copy() locmod_df['aa_stat candidates'] = localization.get_candidates_from_aastat( table, labels=params_dict['labels'], threshold=params_dict['candidate threshold']) if params_dict['use_all_loc']: logger.info('Localizaing all mass shifts on all amino acids. This may take some time.') locmod_df['all candidates'] = [set(parser.std_amino_acids)] * locmod_df.shape[0] else: locmod_df['all candidates'] = locmod_df.apply( lambda x: set(x['unimod candidates']) | set(x['aa_stat candidates']), axis=1) if params_dict['force_term_loc']: logger.debug('Adding terminal localizations for all mass shifts.') locmod_df['all candidates'] = locmod_df['all candidates'].apply(lambda x: x | {'N-term', 'C-term'}) for i in locmod_df.loc[locmod_df['is isotope']].index: locmod_df.at[i, 'all candidates'] = locmod_df.at[i, 'all candidates'].union( locmod_df.at[locmod_df.at[i, 'isotope index'], 'all candidates']) for i in locmod_df.index: ac = locmod_df.at[i, 'all candidates'] for term in ('N', 'C'): if 'Protein {}-term'.format(term) in ac and '{}-term'.format(term) in ac: ac.remove('Protein {}-term'.format(term)) logger.debug('Removing protein %s-term localization for %s as redundant.', term, i) if reference_mass_shift == 0.0: locmod_df.at[reference_label, 'all candidates'] = set() locmod_df['candidates for loc'] = localization.get_full_set_of_candidates(locmod_df) logger.info('Reference mass shift %s', reference_label) localization_dict = {} for ms_label, (ms, df) in mass_shift_data_dict.items(): localization_dict.update(localization.localization( df, ms, ms_label, locmod_df.at[ms_label, 'candidates for loc'], params_dict, spectra_dict, {k: v[0] for k, v in mass_shift_data_dict.items()})) locmod_df['localization'] = pd.Series(localization_dict).apply(dict) locmod_df.to_csv(os.path.join(save_directory, 'localization_statistics.csv'), index=False) if not locmod_df.at[reference_label, 'all candidates']: logger.debug('Explicitly writing out peptide table for reference mass shift.') df = mass_shift_data_dict[reference_label][1] io.save_df(reference_label, df, save_directory, params_dict) for reader in spectra_dict.values(): reader.close() else: locmod_df = None io.save_peptides(mass_shift_data_dict, save_directory, params_dict) logger.info('No spectrum files. MS/MS localization is not performed.') logger.info('Plotting mass shift figures...') for ms_label, data in figure_data.items(): if locmod_df is not None: localizations = locmod_df.at[ms_label, 'localization'] sumof = locmod_df.at[ms_label, 'sum of mass shifts'] else: localizations = None sumof = None stats.plot_figure(ms_label, *data, params_dict, save_directory, localizations, sumof) logger.info('AA_stat results saved to %s', os.path.abspath(args.dir)) utils.internal('Data dict: \n%s', mass_shift_data_dict) recommended_fix_mods = recommendations.determine_fixed_mods(figure_data, table, locmod_df, mass_shift_data_dict, params_dict) logger.debug('Recommended fixed mods: %s', recommended_fix_mods) if recommended_fix_mods: logger.info('Recommended fixed modifications: %s.', utils.format_mod_dict_str(recommended_fix_mods)) else: logger.info('Fixed modifications not recommended.') recommended_var_mods = recommendations.determine_var_mods( figure_data, table, locmod_df, mass_shift_data_dict, params_dict, recommended_fix_mods) logger.debug('Recommended variable mods: %s', recommended_var_mods) if recommended_var_mods: logger.info('Recommended variable modifications: %s.', utils.format_mod_list(recommended_var_mods)) else: logger.info('Variable modifications not recommended.') combinations = utils.get_varmod_combinations(recommended_var_mods, ms_labels, params_dict['prec_acc']) logger.debug('Found combinations in recommended variable mods: %s', combinations) opposite = utils.get_opposite_mods( params_dict['fix_mod'], recommended_fix_mods, recommended_var_mods, ms_labels, params_dict['prec_acc']) logger.debug('Opposite modifications: %s', utils.format_mod_list([recommended_var_mods[i] for i in opposite])) html.render_html_report(table, mass_shift_data_dict, locmod_df, params_dict, recommended_fix_mods, recommended_var_mods, combinations, opposite, save_directory, ms_labels, step=step) return figure_data, table, locmod_df, mass_shift_data_dict, recommended_fix_mods, recommended_var_mods
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/AA_stat.py
AA_stat.py
from pyteomics.version import _VersionInfo __version__ = '2.5.5' version_info = _VersionInfo(__version__) version = __version__
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/version.py
version.py
import matplotlib matplotlib.use('Agg') import pylab as plt from matplotlib.backends.backend_pdf import PdfPages import sys import ast import os import glob from configparser import ConfigParser import multiprocessing as mp from collections import defaultdict import logging import re import numpy as np import pandas as pd from pyteomics import pepxml, mgf, mzml from . import utils, stats AA_STAT_PARAMS_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'default.cfg') logger = logging.getLogger(__name__) def sanitize_df(df, params_dict): # drop unneeded columns column_keys = ['proteins_column', 'peptides_column', 'mass_shifts_column', 'score_column', 'measured_mass_column', 'calculated_mass_column', 'rt_column', 'next_aa_column', 'prev_aa_column', 'spectrum_column', 'charge_column', 'mods_column'] needed = {params_dict[k] for k in column_keys} to_drop = [c for c in df.columns if c not in needed] old_size = df.shape[1] df.drop(to_drop, axis=1, inplace=True) logger.debug('Kept %d and dropped %d out of %d initial columns.', df.shape[1], len(to_drop), old_size) # TODO: simplify and sanitize columns here return df def preprocess_df(df, filename, params_dict): ''' Preprocesses DataFrame. Parameters ---------- df: DataFrame Open search result df. filename: str Path of initial (pepxml or csv) file params_dict: dict Dict with all input parameters Returns ------- DataFrame ''' logger.debug('Preprocessing %s', filename) pp = PdfPages(os.path.join(params_dict['output directory'], os.path.basename(filename) + '.clustering.pdf')) window = params_dict['zero_window'] zero_bin = 0 shifts = params_dict['mass_shifts_column'] if not params_dict['decoy_prefix']: isdddict = {} for prefix in params_dict['decoy_prefix_list']: is_decoy = df[params_dict['proteins_column']].apply( lambda s: all(x.startswith(prefix) for x in s)) isd = is_decoy.sum() logger.debug('Trying prefix %s for %s... Found %d decoys.', prefix, filename, isd) isdddict[prefix] = isd prefix = max(isdddict, key=isdddict.get) logger.debug('Selected prefix %s for file %s (%d decoys)', prefix, filename, isdddict[prefix]) else: prefix = params_dict['decoy_prefix'] df['is_decoy'] = df[params_dict['proteins_column']].apply(lambda s: all(x.startswith(prefix) for x in s)) del df[params_dict['proteins_column']] if not df['is_decoy'].sum(): logger.error('No decoy IDs found in %s.', filename) if not params_dict['decoy_prefix']: logger.error('Configured decoy prefixes are: %s. Check you files or config.', ', '.join(params_dict['decoy_prefix_list'])) else: logger.error('Configured decoy prefix is: %s. Check your files or config.', prefix) return ms, filtered = utils.fdr_filter_mass_shift([None, zero_bin, window], df, params_dict) n = filtered.shape[0] logger.debug('%d filtered peptides near zero.', n) df[shifts] = utils.choose_correct_massdiff( df[shifts], df[params_dict['measured_mass_column']] - df[params_dict['calculated_mass_column']], params_dict) if params_dict['calibration'] == 'off': logger.info('Mass calibration is disabled. Skipping.') elif params_dict['calibration'] != 'simple': if n < params_dict['min_peptides_for_mass_calibration']: logger.warning('Skipping mass calibration: not enough peptides near zero mass shift.') else: to_fit, unit = stats.get_fittable_series(filtered, params_dict) # save copies of mass shift column, for use in boolean indexing shift_copy = df[shifts].copy() old_shifts = filtered[shifts].copy() if params_dict['clustering']: clustering = stats.clusters(filtered, to_fit, unit, filename, params_dict, pp) if clustering is None: filtered_clusters = None else: filtered_clusters = stats.filter_clusters(clustering, filtered, to_fit, params_dict) if not filtered_clusters: logger.info('Clustering was unsuccesful for %s. Calibrating masses in the whole file.', filename) elif len(filtered_clusters) == 1: logger.info('One large cluster found in %s. Calibrating masses in the whole file.', filename) filtered_clusters = None else: logger.info('Splitting %s into %d pieces.', filename, len(filtered_clusters)) f = plt.figure() for i in filtered_clusters: plt.hist(filtered.loc[to_fit.index].loc[clustering.labels_ == i, shifts], label=i, alpha=0.2, bins=25, density=True) plt.xlabel(shifts) plt.title('Before correction') plt.legend() pp.savefig(f) plt.close() else: filtered_clusters = None if not filtered_clusters: slices = [None] titles = ['Whole file'] assigned_masks = [slice(None)] filtered_clusters = ['<all>'] else: slices, titles = [], [] for i in filtered_clusters: slices.append(clustering.labels_ == i) titles.append('Cluster {}'.format(i)) assigned_masks = stats.get_cluster_masks(filtered_clusters, clustering, df, to_fit, params_dict) for c, slice_, title, mask in zip(filtered_clusters, slices, titles, assigned_masks): # logger.debug('Slice size for cluster %s is: %s', c, slice_.size if slice_ is not None else None) to_fit, unit = stats.get_fittable_series(filtered, params_dict, slice_) popt = stats._gauss_fit_slice(to_fit, unit, filename, title, params_dict, pp) if unit == 'Da': shift_copy.loc[mask] -= popt[1] elif unit == 'ppm': shift_copy.loc[mask] -= popt[1] * df[params_dict['calculated_mass_column']] / 1e6 else: freq_measured = 1e6 / np.sqrt(utils.measured_mz_series(df.loc[mask], params_dict)) - popt[1] mass_corrected = (((1e6 / freq_measured) ** 2) * df.loc[mask, params_dict['charge_column']] - utils.H * df.loc[mask, params_dict['charge_column']]) correction = mass_corrected - df.loc[mask, params_dict['measured_mass_column']] logger.debug('Average systematic mass shift for cluster %s: %f', c, -correction.mean()) shift_copy.loc[mask] += correction # corrected mass shifts are written back here df[shifts] = shift_copy filtered[shifts] = df.loc[filtered.index, shifts] f = plt.figure() floc = filtered.loc[old_shifts.abs() < params_dict['zero_window']] sc = plt.scatter(floc[shifts], floc[params_dict['rt_column']], c=clustering.labels_ if (params_dict['clustering'] and clustering) else 'k') if params_dict['clustering'] and clustering: plt.legend(*sc.legend_elements(), title='Clusters') plt.xlabel(shifts) plt.ylabel(params_dict['rt_column']) plt.title('After correction') pp.savefig(f) plt.close() if filtered_clusters != ['<all>']: f = plt.figure() for i in filtered_clusters: plt.hist(floc.loc[clustering.labels_ == i, shifts], label=i, alpha=0.2, bins=25, density=True) plt.xlabel(shifts) plt.legend() pp.savefig(f) plt.close() pp.close() df['file'] = os.path.splitext(os.path.basename(filename))[0] check_composition = df[params_dict['peptides_column']].apply(lambda x: utils.check_composition(x, params_dict['labels'])) del df[params_dict['measured_mass_column']] del df[params_dict['calculated_mass_column']] del df[params_dict['rt_column']] return df.loc[check_composition] def read_pepxml(fname, params_dict): ''' Reads pepxml file and preprocess it. Parameters ---------- fname: str Path to pepxml file params_dict: dict Dict with all input parameters Returns ------- DataFrame ''' logger.debug('Reading %s', fname) df = pepxml.DataFrame(fname, read_schema=False) return preprocess_df(sanitize_df(df, params_dict), fname, params_dict) def read_csv(fname, params_dict): """ Reads csv file. Paramenters ----------- fname : str Path to file name. params_dict : dict Dict with paramenters for parsing csv file. `csv_delimiter`, `proteins_column`, `proteins_delimiter` Returns ------- A DataFrame of csv file. """ # logger.info('Reading %s', fname) df = pd.read_csv(fname, sep=params_dict['csv_delimiter']) df[params_dict['mods_column']] = df[params_dict['mods_column']].apply(ast.literal_eval) protein = params_dict['proteins_column'] prev = params_dict['prev_aa_column'] next_ = params_dict['next_aa_column'] for c in [protein, prev, next_]: if (df[c].str[0] == '[').all() and (df[c].str[-1] == ']').all(): df[c] = df[c].apply(ast.literal_eval) else: df[c] = df[c].str.split(params_dict['proteins_delimeter']) return preprocess_df(sanitize_df(df, params_dict), fname, params_dict) def read_mgf(file_path): return mgf.IndexedMGF(file_path) def read_mzml(file_path): # write this return mzml.PreIndexedMzML(file_path) def read_spectra(args): readers = { 'mgf': read_mgf, 'mzml': read_mzml, } out_dict = {} for ftype, reader in readers.items(): spec_filenames = getattr(args, ftype) if spec_filenames: break else: return {} for inp in [args.pepxml, args.csv]: if inp: break if len(inp) != len(spec_filenames): logger.critical('Numbers of input files and spectrum files do not match (%d and %d).', len(inp), len(spec_filenames)) sys.exit(1) for i, filename in zip(inp, spec_filenames): name = os.path.splitext(os.path.basename(i))[0] out_dict[name] = reader(filename) return out_dict def read_input(args, params_dict): """ Reads open search output, assembles all data in one DataFrame. """ logger.info('Reading input files...') readers = { 'pepxml': read_pepxml, 'csv': read_csv, } nproc = params_dict['processes'] if nproc == 1: dfs = [] logger.debug('Reading files in one process.') for ftype, reader in readers.items(): filenames = getattr(args, ftype) logger.debug('Filenames [%s]: %s', ftype, filenames) if filenames: for filename in filenames: dfs.append(reader(filename, params_dict)) else: nfiles = 0 for ftype, reader in readers.items(): filenames = getattr(args, ftype) if filenames: nfiles += len(filenames) if nproc > 0: nproc = min(nproc, nfiles) else: nproc = min(nfiles, mp.cpu_count()) logger.debug('Reading files using %s processes.', nproc) pool = mp.Pool(nproc) results = [] for ftype, reader in readers.items(): filenames = getattr(args, ftype) logger.debug('Filenames [%s]: %s', ftype, filenames) if filenames: for filename in filenames: results.append(pool.apply_async(reader, args=(filename, params_dict))) dfs = [r.get() for r in results] pool.close() pool.join() if any(x is None for x in dfs): logger.critical('There were errors when reading input.') return logger.info('Starting analysis...') logger.debug('%d dfs collected.', len(dfs)) data = pd.concat(dfs, axis=0) data.index = range(len(data)) data['file'] = data['file'].astype('category') logger.debug('Memory usage:') logger.debug(data.memory_usage(deep=True)) return data def read_config_file(fname): params = ConfigParser(delimiters=('=', ':'), comment_prefixes=('#'), inline_comment_prefixes=('#')) params.read(AA_STAT_PARAMS_DEFAULT) if fname: if not os.path.isfile(fname): logger.error('Configuration file not found: %s', fname) else: params.read(fname) else: logger.info('Using default parameters for AA_stat.') return params def get_parameters(params): """ Reads paramenters from cfg file to one dict. Returns dict. """ params_dict = defaultdict() # data params_dict['decoy_prefix'] = params.get('data', 'decoy prefix') params_dict['decoy_prefix_list'] = re.split(r',\s*', params.get('data', 'decoy prefix list')) params_dict['FDR'] = params.getfloat('data', 'FDR') params_dict['labels'] = params.get('data', 'labels').strip().split() params_dict['rule'] = params.get('data', 'cleavage rule') # csv input params_dict['csv_delimiter'] = params.get('csv input', 'delimiter') params_dict['proteins_delimeter'] = params.get('csv input', 'proteins delimiter') params_dict['proteins_column'] = params.get('csv input', 'proteins column') params_dict['peptides_column'] = params.get('csv input', 'peptides column') params_dict['mass_shifts_column'] = params.get('csv input', 'mass shift column') params_dict['score_column'] = params.get('csv input', 'score column') params_dict['measured_mass_column'] = params.get('csv input', 'measured mass column') params_dict['calculated_mass_column'] = params.get('csv input', 'calculated mass column') params_dict['rt_column'] = params.get('csv input', 'retention time column') params_dict['next_aa_column'] = params.get('csv input', 'next aa column') params_dict['prev_aa_column'] = params.get('csv input', 'previous aa column') params_dict['spectrum_column'] = params.get('csv input', 'spectrum column') params_dict['charge_column'] = params.get('csv input', 'charge column') params_dict['mods_column'] = params.get('csv input', 'modifications column') params_dict['score_ascending'] = params.getboolean('csv input', 'score ascending') # general params_dict['bin_width'] = params.getfloat('general', 'width of bin in histogram') params_dict['so_range'] = tuple(float(x) for x in params.get('general', 'open search range').split(',')) params_dict['walking_window'] = params.getfloat('general', 'shifting window') params_dict['FDR_correction'] = params.getboolean('general', 'FDR correction') params_dict['processes'] = params.getint('general', 'processes') params_dict['zero_window'] = params.getfloat('general', 'zero peak window') params_dict['prec_acc'] = params.getfloat('general', 'mass shift tolerance') params_dict['zero bin tolerance'] = params.getfloat('general', 'zero shift mass tolerance') params_dict['zero min intensity'] = params.getfloat('general', 'zero shift minimum intensity') params_dict['min_peptides_for_mass_calibration'] = params.getint('general', 'minimum peptides for mass calibration') params_dict['specific_mass_shift_flag'] = params.getboolean('general', 'use specific mass shift window') params_dict['specific_window'] = [float(x) for x in params.get('general', 'specific mass shift window').split(',')] params_dict['figsize'] = tuple(float(x) for x in params.get('general', 'figure size in inches').split(',')) params_dict['calibration'] = params.get('general', 'mass calibration') params_dict['artefact_thresh'] = params.getfloat('general', 'artefact detection threshold') params_dict['html_truncate'] = params.getint('general', 'html info truncation length') #clustering params_dict['clustering'] = params.getboolean('clustering', 'use clustering') params_dict['eps_adjust'] = params.getfloat('clustering', 'dbscan eps factor') params_dict['min_samples'] = params.getint('clustering', 'dbscan min_samples') params_dict['clustered_pct_min'] = params.getfloat('clustering', 'total clustered peptide percentage minimum') params_dict['cluster_span_min'] = params.getfloat('clustering', 'cluster span percentage minimum') # fit params_dict['shift_error'] = params.getint('fit', 'shift error') params_dict['max_deviation_sigma'] = params.getfloat('fit', 'standard deviation threshold for sigma') params_dict['max_deviation_height'] = params.getfloat('fit', 'standard deviation threshold for height') params_dict['fit batch'] = params.getint('fit', 'batch') # localization params_dict['ion_types'] = tuple(params.get('localization', 'ion type').replace(' ', '').split(',')) params_dict['frag_acc'] = params.getfloat('localization', 'fragment ion mass tolerance') params_dict['candidate threshold'] = params.getfloat('localization', 'frequency threshold') params_dict['min_spec_matched'] = params.getint('localization', 'minimum matched peaks') params_dict['force_term_loc'] = params.getboolean('localization', 'always try terminal localization') params_dict['use_all_loc'] = params.getboolean('localization', 'try all localizations') # modifications params_dict['variable_mods'] = params.getint('modifications', 'recommend variable modifications') params_dict['multiple_mods'] = params.getboolean('modifications', 'recommend multiple modifications on residue') params_dict['fix_mod_zero_thresh'] = params.getfloat('modifications', 'fixed modification intensity threshold') params_dict['min_fix_mod_pep_count_factor'] = params.getfloat('modifications', 'peptide count factor threshold') params_dict['recommend isotope threshold'] = params.getfloat('modifications', 'isotope error abundance threshold') params_dict['min_loc_count'] = params.getint('modifications', 'minimum localization count') params_dict['fix_mod'] = utils.parse_mod_list(params.get('modifications', 'configured fixed modifications'), 'fixed') params_dict['var_mod'] = utils.parse_mod_list(params.get('modifications', 'configured variable modifications'), 'variable') return params_dict def set_additional_params(params_dict): if params_dict['specific_mass_shift_flag']: logger.info('Custom bin: %s', params_dict['specific_window']) params_dict['so_range'] = params_dict['specific_window'][:] elif params_dict['so_range'][1] - params_dict['so_range'][0] > params_dict['walking_window']: window = params_dict['walking_window'] / params_dict['bin_width'] else: window = (params_dict['so_range'][1] - params_dict['so_range']) / params_dict['bin_width'] if int(window) % 2 == 0: params_dict['window'] = int(window) + 1 else: params_dict['window'] = int(window) # should be odd params_dict['bins'] = np.arange(params_dict['so_range'][0], params_dict['so_range'][1] + params_dict['bin_width'], params_dict['bin_width']) _rule_to_enz = { 'trypsin': {'cut': 'KR', 'no_cut': 'P', 'sense': 'C'}, } def get_params_dict(args): logger.debug('Received args: %s', args) fname = args.params outdir = args.dir params = read_config_file(fname) params_dict = get_parameters(params) set_additional_params(params_dict) if args.processes is not None: params_dict['processes'] = args.processes params_dict['output directory'] = outdir if args.pepxml: fmod, vmod = utils.get_fix_var_modifications(args.pepxml[0], params_dict['labels']) params_dict['fix_mod'] = fmod params_dict['var_mod'] = utils.format_grouped_keys(utils.group_terminal(vmod), params_dict) params_dict['enzyme'] = utils.get_specificity(args.pepxml[0]) else: if args.fmods: if '@' in args.fmods: params_dict['fix_mod'] = utils.parse_mod_list(args.fmods, 'fixed') else: params_dict['fix_mod'] = ast.literal_eval(args.fmods) elif not params_dict['fix_mod']: logger.info('No fixed modifications specified. Use --fmods to configure them.') if args.vmods: if '@' in args.vmods: params_dict['var_mod'] = utils.parse_mod_list(args.vmods, 'variable') else: params_dict['var_mod'] = ast.literal_eval(args.vmods) elif not params_dict['var_mod']: logger.info('No variable modifications specified. Use --vmods to configure them.') if args.enzyme: if '|' in args.enzyme: params_dict['enzyme'] = utils.convert_tandem_cleave_rule_to_regexp(args.enzyme, params_dict) else: params_dict['enzyme'] = ast.literal_eval(args.enzyme) elif params_dict['rule'] in _rule_to_enz: params_dict['enzyme'] = _rule_to_enz[params_dict['rule']] logger.info('Using standard specificity for %s.', params_dict['rule']) else: logger.info('Enyzme not specified. Use --enzyme to configure.') params_dict['enzyme'] = None return params_dict _format_globs = { 'pepxml': ['*.pepXML', '*.pep.xml'], 'csv': ['*.csv'], 'mzml': ['*.mzML'], 'mgf': ['*.mgf'], } def resolve_filenames(args): for fformat, gs in _format_globs.items(): value = getattr(args, fformat) if value: logger.debug('Received %s list: %s', fformat, value) out = [] for val in value: if os.path.isdir(val): for g in gs: files = glob.glob(os.path.join(val, g)) logger.debug('Found %d files for glob %s in %s', len(files), g, val) out.extend(files) else: out.append(val) logger.debug('Final %s list: %s', fformat, out) setattr(args, fformat, out) def table_path(dir, ms): return os.path.join(dir, ms + '.csv') def save_df(ms, df, save_directory, params_dict): peptide = params_dict['peptides_column'] spectrum = params_dict['spectrum_column'] prev_aa = params_dict['prev_aa_column'] next_aa = params_dict['next_aa_column'] table = df[[peptide, spectrum]].copy() peptide1 = df.apply(utils.get_column_with_mods, axis=1, args=(params_dict,)) table[peptide] = df[prev_aa].str[0] + '.' + peptide1 + '.' + df[next_aa].str[0] with open(table_path(save_directory, ms), 'w') as out: table.to_csv(out, index=False, sep='\t') def save_peptides(data, save_directory, params_dict): for ms_label, (ms, df) in data.items(): save_df(ms_label, df, save_directory, params_dict)
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/io.py
io.py
import unittest import numpy as np from .localization import get_theor_spectrum from .AA_stat import AA_stat from . import utils, io import argparse import logging from pyteomics import mass import os class AAstatTheorSpectrumTest(unittest.TestCase): def setUp(self): self.spec_PEPTIDE = { ('b', 1): np.array([98.06003647, 227.10262647, 324.15538647, 425.20306647, 538.28712647, 653.31406647]), ('y', 1): np.array([148.06043115, 263.08737115, 376.17143115, 477.21911115, 574.27187115, 703.31446115]), ('b', 2): np.array([49.53365647, 114.05495147, 162.58133147, 213.10517147, 269.64720147, 327.16067147]), ('y', 2): np.array([74.53385381, 132.04732381, 188.58935381, 239.11319381, 287.63957381, 352.16086881]) } self.spec_int_PEPTIDE = { 'b': {4953, 9806, 11405, 16258, 21310, 22710, 26964, 32415, 32716, 42520, 53828, 65331}, 'y': {26308, 23911, 47721, 28763, 18858, 35216, 37617, 57427, 13204, 14806, 70331, 7453} } self.spec_mPEPTIDE = { ('b', 1): np.array([114.05495147, 243.09754147, 340.15030147, 441.19798147, 554.28204147, 669.30898147]), ('y', 1): np.array([148.06043115, 263.08737115, 376.17143115, 477.21911115, 574.27187115, 703.31446115]), ('b', 2): np.array([57.53111397, 122.05240897, 170.57878897, 221.10262897, 277.64465897, 335.15812897]), ('y', 2): np.array([74.53385381, 132.04732381, 188.58935381, 239.11319381, 287.63957381, 352.16086881]) } self.spec_int_mPEPTIDE = { 'b': {17057, 55428, 33515, 11405, 12205, 66930, 27764, 24309, 44119, 5753, 22110, 34015}, 'y': {7453, 13204, 14806, 18858, 23911, 26308, 28763, 35216, 37617, 47721, 57427, 70331} } self.spec_PEPTIDE_cz = { ('c', 1): np.array([115.08658557, 244.12917557, 341.18193557, 442.22961557, 555.31367557, 670.34061557]), ('z', 1): np.array([131.03388205, 246.06082205, 359.14488205, 460.19256205, 557.24532205, 686.28791205]), ('c', 2): np.array([58.04693102, 122.56822602, 171.09460602, 221.61844602, 278.16047602, 335.67394602]), ('z', 2): np.array([66.02057926, 123.53404926, 180.07607926, 230.59991926, 279.12629926, 343.64759426]) } self.spec_int_PEPTIDE_cz = { 'c': {12256, 34118, 27816, 55531, 5804, 22161, 11508, 17109, 67034, 24412, 44222, 33567}, 'z': {12353, 46019, 27912, 35914, 6602, 55724, 13103, 23059, 68628, 18007, 34364, 24606} } def _compare_spectra(self, spec, spec_int, spec_true, spec_int_true, eps=1e-6): spec = {k: sorted(v) for k, v in spec.items()} self.assertEqual(spec.keys(), spec_true.keys()) for k in spec: spec[k].sort() # print(k) # print(spec[k]) # print(spec_true[k]) self.assertTrue(np.allclose(spec[k], spec_true[k], atol=eps)) self.assertEqual(spec_int, spec_int_true) def test_theor_spec_PEPTIDE(self): spec, spec_int = get_theor_spectrum(list('PEPTIDE'), 0.01, ion_types=('b', 'y'), maxcharge=2) self._compare_spectra(spec, spec_int, self.spec_PEPTIDE, self.spec_int_PEPTIDE) def test_theor_spec_PEvPTIDE(self): MOD = 15.994915 acc = 0.01 pos = 3 spec, spec_int = get_theor_spectrum(list('PEPTIDE'), acc, ion_types=('b', 'y'), maxcharge=2, modifications={pos: MOD}) spec_true = self.spec_PEPTIDE.copy() for k in spec_true: if k[0] == 'b': spec_true[k][pos - 1:] += MOD / k[1] else: spec_true[k][7 - pos:] += MOD / k[1] spec_int_true = {} for t in ('b', 'y'): spec_int_true[t] = {int(x / acc) for x in np.concatenate((spec_true[(t, 1)], spec_true[(t, 2)]))} self._compare_spectra(spec, spec_int, spec_true, spec_int_true) def test_theor_spec_vPEPTIDE(self): MOD = 15.994915 acc = 0.01 pos = 1 spec, spec_int = get_theor_spectrum(list('PEPTIDE'), acc, ion_types=('b', 'y'), maxcharge=2, modifications={pos: MOD}) spec_true = self.spec_PEPTIDE.copy() for k in spec_true: if k[0] == 'b': spec_true[k][pos - 1:] += MOD / k[1] else: spec_true[k][7 - pos:] += MOD / k[1] spec_int_true = {} for t in ('b', 'y'): spec_int_true[t] = {int(x / acc) for x in np.concatenate((spec_true[(t, 1)], spec_true[(t, 2)]))} self._compare_spectra(spec, spec_int, spec_true, spec_int_true) def test_theor_spec_PEvaPTIDE(self): MOD = 15.994915 acc = 0.01 pos = 3 aa_mass = mass.std_aa_mass.copy() aa_mass['aP'] = aa_mass['P'] + MOD peptide = list('PEPTIDE') peptide[pos - 1] = 'aP' spec, spec_int = get_theor_spectrum(peptide, acc, ion_types=('b', 'y'), maxcharge=2, modifications={pos: MOD}, aa_mass=aa_mass) spec_true = self.spec_PEPTIDE.copy() for k in spec_true: if k[0] == 'b': spec_true[k][pos - 1:] += 2 * MOD / k[1] else: spec_true[k][7 - pos:] += 2 * MOD / k[1] spec_int_true = {} for t in ('b', 'y'): spec_int_true[t] = {int(x / acc) for x in np.concatenate((spec_true[(t, 1)], spec_true[(t, 2)]))} self._compare_spectra(spec, spec_int, spec_true, spec_int_true) def test_theor_spec_mPEPTIDE(self): MOD = 15.994915 acc = 0.01 custom_mass = mass.std_aa_mass.copy() custom_mass['mP'] = mass.std_aa_mass['P'] + MOD spec, spec_int = get_theor_spectrum(['mP'] + list('EPTIDE'), acc, ion_types=('b', 'y'), maxcharge=2, aa_mass=custom_mass) self._compare_spectra(spec, spec_int, self.spec_mPEPTIDE, self.spec_int_mPEPTIDE) def test_theor_spec_PEPTIDE_cz(self): spec, spec_int = get_theor_spectrum(list('PEPTIDE'), 0.01, ion_types=('c', 'z'), maxcharge=2) self._compare_spectra(spec, spec_int, self.spec_PEPTIDE_cz, self.spec_int_PEPTIDE_cz) def test_theor_spec_termPEPTIDE(self): MOD = 42.12 acc = 0.01 custom_mass = mass.std_aa_mass.copy() custom_mass['H-'] = MOD + mass.nist_mass['H'][0][0] spec, spec_int = get_theor_spectrum(list('PEPTIDE'), acc, ion_types=('b', 'y'), maxcharge=2, aa_mass=custom_mass) spec_true = self.spec_PEPTIDE.copy() for k in spec_true: if k[0] == 'b': spec_true[k] += MOD / k[1] spec_int_true = self.spec_int_PEPTIDE.copy() spec_int_true['b'] = {int(x / acc) for x in np.concatenate((spec_true[('b', 1)], spec_true[('b', 2)]))} self._compare_spectra(spec, spec_int, spec_true, spec_int_true) def test_theor_spec_PEPTIDEterm(self): MOD = 42.12 acc = 0.01 custom_mass = mass.std_aa_mass.copy() custom_mass['-OH'] = MOD + mass.nist_mass['H'][0][0] + mass.nist_mass['O'][0][0] spec, spec_int = get_theor_spectrum(list('PEPTIDE'), acc, ion_types=('b', 'y'), maxcharge=2, aa_mass=custom_mass) spec_true = self.spec_PEPTIDE.copy() for k in spec_true: if k[0] == 'y': spec_true[k] += MOD / k[1] spec_int_true = self.spec_int_PEPTIDE.copy() spec_int_true['y'] = {int(x / acc) for x in np.concatenate((spec_true[('y', 1)], spec_true[('y', 2)]))} self._compare_spectra(spec, spec_int, spec_true, spec_int_true) class AAstatResultTest(unittest.TestCase): def setUp(self): self.maxDiff = None self.data_dir = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'test_data') self.pepxml = [os.path.join(self.data_dir, 'SDS_01_0{}.pepXML'.format(num)) for num in [1, 2]] self.mzml = [os.path.join(self.data_dir, 'SDS_01_0{}.mzML'.format(num)) for num in [1, 2]] verbosity = int(os.environ.get('AASTAT_VERBOSITY', '1')) levels = [logging.WARNING, logging.INFO, logging.DEBUG, utils.INTERNAL] logging.basicConfig(format='{levelname:>8}: {asctime} {message}', datefmt='[%H:%M:%S]', level=levels[verbosity], style='{') def test_aastat(self): if not os.path.isdir(self.data_dir): print('Test data not found, skipping integrative test.') return for f in self.pepxml + self.mzml: if not os.path.isfile(f): print(f, 'not found, skipping integrative test.') return args = argparse.Namespace(dir=self.data_dir, pepxml=self.pepxml, mzml=self.mzml, mgf=None, csv=None, params=None, processes=None) params_dict = io.get_params_dict(args) # params_dict['decoy_prefix'] = 'DECOY_' self.figure_data, self.table, self.locmod_df, self.mass_shift_data_dict, self.fix_mods, self.var_mods = AA_stat(params_dict, args) counts = [57, 179, 173, 540, 100, 82, 102, 279, 57, 67, 282, 52, 102, 66, 125, 60, 139, 145, 71, 2851, 341, 558, 103, 79, 148, 128, 71, 460, 171, 277, 54, 51, 106, 197, 61, 57, 341, 397, 108, 67, 158, 78] # print(self.table['# peptides in bin'].tolist()) print(self.table['# peptides in bin'].sum(), 'peptides found. The test has', sum(counts)) shifts = ['-246.1898', '-229.1630', '-203.1838', '-172.1413', '-171.1388', '-157.1416', '-147.1573', '-129.1469', '-116.0580', '-115.1202', '-114.1359', '-113.1334', '-100.1205', '-91.0091', '-72.1252', '-25.0314', '-18.0105', '-9.0368', '-2.0156', '+0.0000', '+0.9842', '+1.0029', '+1.9874', '+2.0048', '+13.9786', '+14.9845', '+15.0114', '+15.9951', '+16.9976', '+17.0269', '+18.0103', '+18.0282', '+30.9811', '+31.9893', '+32.9926', '+47.9847', '+52.9219', '+57.0217', '+58.0243', '+100.0160', '+229.1628', '+230.1650'] print(self.table.shape[0], 'mass shifts found. The test has', len(shifts)) # print(self.table.index.tolist()) self.assertEqual(self.table['# peptides in bin'].tolist(), counts) self.assertEqual(self.table.index.tolist(), shifts) self.assertEqual(self.fix_mods, {}) self.assertEqual(self.var_mods, [ ('isotope error', 1), ('N', '+0.9842'), ('K', '-114.1359'), ('M', '+15.9951'), ('K', '+57.0217'), ('C', '-9.0368'), ]) # print(self.locmod_df['localization'].tolist()) self.assertEqual(self.locmod_df['localization'].tolist(), [ {'N-term_-246.1898': 24, 'C_-246.1898': 26, 'P_-246.1898': 8, 'H_-246.1898': 4, 'non-localized': 7, 'Y_-246.1898': 7}, {'non-localized': 90, 'N-term_-246.1898': 3, 'C_-246.1898': 5, 'E_+17.0269': 13, 'P_-246.1898': 9, 'D_+17.0269': 13, 'H_-246.1898': 14, 'Y_-246.1898': 9, 'C-term_+17.0269': 5, 'K_+17.0269': 10, 'N-term_+17.0269': 7, 'R_+17.0269': 1}, {'H_-203.1838': 32, 'Y_-203.1838': 87, 'N-term_-203.1838': 9, 'non-localized': 4}, {}, {}, {'H_-157.1416': 17, 'N-term_-157.1416': 5}, {'H_-147.1573': 21, 'non-localized': 5, 'N-term_-147.1573': 3, 'D_-18.0105': 1, 'H_-129.1469': 1}, {'H_-129.1469': 43, 'N-term_-129.1469': 5}, {'C_-116.0580': 24, 'non-localized': 28, 'N-term_-116.0580': 1}, {'K_-115.1202': 62, 'C-term_-115.1202': 47}, {'K_-114.1359': 173, 'Q_+0.9842': 5, 'C-term_-115.1202': 8, 'K_-115.1202': 8, 'H_-114.1359': 14, 'C-term_-114.1359': 151, 'N_+0.9842': 2, 'R_+0.9842': 1, 'non-localized': 5, 'N-term_-114.1359': 5, 'D_+15.0114': 1, 'H_-129.1469': 3, 'I_+15.0114': 1, 'N-term_+15.0114': 1, 'E_+15.0114': 1}, {'K_-113.1334': 34, 'C-term_-113.1334': 27, 'N-term_-113.1334': 1, 'H_-113.1334': 9, 'non-localized': 2}, {'C-term_-100.1205': 74, 'K_-100.1205': 97, 'H_-157.1416': 2, 'E_+57.0217': 1, 'N-term_-100.1205': 2, 'G_+57.0217': 1}, {'non-localized': 1, 'C_-91.0091': 62}, {'K_-72.1252': 124, 'C-term_-72.1252': 92, 'N-term_-72.1252': 4}, {'C_-25.0314': 57, 'N-term_-25.0314': 3, 'Y_-25.0314': 1, 'non-localized': 1}, {'Y_-18.0105': 2, 'non-localized': 18, 'T_-18.0105': 44, 'S_-18.0105': 21, 'E_-18.0105': 16, 'D_-18.0105': 36, 'C_-18.0105': 1, 'N-term_-18.0105': 3, 'C-term_-18.0105': 1}, {'C_-9.0368': 125, 'non-localized': 10, 'C_-25.0314': 8, 'C_+15.9951': 1, 'G_+15.9951': 2, 'C-term_-9.0368': 1, 'R_-9.0368': 2, 'N-term_-9.0368': 16, 'E_+15.9951': 1, 'I_+15.9951': 1, 'M_+15.9951': 1, 'N-term_+15.9951': 1, 'A_+15.9951': 1, 'C-term_+15.9951': 1, 'K_+15.9951': 1}, {'T_-18.0105': 3, 'D_+15.9951': 2, 'T_-2.0156': 1, 'C_-2.0156': 31, 'F_+15.9951': 1, 'Y_-18.0105': 1, 'L_+15.9951': 2, 'E_-18.0105': 3, 'non-localized': 10, 'C-term_-2.0156': 7, 'K_-2.0156': 7, 'S_-18.0105': 2, 'T_+15.9951': 1, 'S_-2.0156': 3, 'N-term_-2.0156': 2, 'V_-2.0156': 4, 'A_+15.9951': 3, 'C_-18.0105': 2, 'N-term_+15.9951': 1, 'Y_-2.0156': 2, 'K_+15.9951': 1, 'N-term_-18.0105': 1, 'D_-18.0105': 2, 'W_+15.9951': 1, 'M_+15.9951': 2}, {}, {'N_+0.9842': 232, 'Q_+0.9842': 73, 'C-term_+0.9842': 12, 'R_+0.9842': 15, 'non-localized': 5, 'N-term_+0.9842': 14}, {'T_+1.0029': 34, 'D_+1.0029': 34, 'V_+1.0029': 42, 'Y_+1.0029': 34, 'non-localized': 61, 'S_+1.0029': 38, 'C_+1.0029': 35, 'P_+1.0029': 44, 'E_+1.0029': 65, 'G_+1.0029': 34, 'L_+1.0029': 46, 'A_+1.0029': 21, 'I_+1.0029': 27, 'M_+1.0029': 9, 'F_+1.0029': 34, 'N-term_+1.0029': 1}, {'V_+1.9874': 9, 'C-term_+1.9874': 3, 'R_+1.9874': 3, 'Q_+1.9874': 15, 'N_+1.9874': 58, 'non-localized': 16, 'K_+1.9874': 1, 'W_+1.9874': 1}, {'non-localized': 31, 'D_+2.0048': 1, 'Y_+2.0048': 5, 'L_+2.0048': 6, 'F_+2.0048': 2, 'E_+2.0048': 3, 'V_+2.0048': 6, 'S_+2.0048': 3, 'C_+2.0048': 3, 'G_+2.0048': 4, 'P_+2.0048': 7, 'I_+2.0048': 2, 'T_+2.0048': 4, 'A_+2.0048': 2}, {'non-localized': 17, 'E_+13.9786': 9, 'T_-18.0105': 7, 'I_+31.9893': 1, 'P_+13.9786': 12, 'Q_+13.9786': 9, 'T_-2.0156': 3, 'W_+15.9951': 4, 'D_+15.9951': 2, 'Y_-2.0156': 3, 'V_+13.9786': 12, 'S_+13.9786': 6, 'I_+13.9786': 10, 'P_+31.9893': 2, 'N-term_+13.9786': 8, 'L_+13.9786': 19, 'W_+13.9786': 6, 'T_+13.9786': 6, 'C-term_+31.9893': 3, 'K_+31.9893': 2, 'C_+31.9893': 2, 'C-term_-2.0156': 6, 'K_-2.0156': 7, 'S_-18.0105': 2, 'W_+31.9893': 2, 'S_-2.0156': 3, 'V_+15.9951': 1, 'N-term_-2.0156': 1, 'Y_-18.0105': 1, 'R_+31.9893': 1, 'C-term_+13.9786': 1, 'R_+13.9786': 2, 'F_+15.9951': 1, 'C_+15.9951': 1, 'A_+13.9786': 9, 'E_-18.0105': 2, 'N-term_+15.9951': 2, 'L_+15.9951': 1, 'L_+31.9893': 1, 'Y_+31.9893': 1, 'G_+15.9951': 1, 'C_-2.0156': 1, 'A_+15.9951': 1, 'Y_+15.9951': 3, 'T_+15.9951': 1, 'M_+15.9951': 1, 'V_-2.0156': 1, 'D_-18.0105': 1, 'V_+31.9893': 1, 'Q_+15.9951': 1}, {'T_+14.9845': 9, 'W_+14.9845': 7, 'M_+14.9845': 14, 'non-localized': 31, 'S_+14.9845': 6, 'A_+14.9845': 5, 'P_+14.9845': 9, 'V_+14.9845': 9, 'E_+14.9845': 10, 'L_+14.9845': 17, 'Q_+14.9845': 3, 'N-term_+14.9845': 7, 'I_+14.9845': 6, 'C-term_+14.9845': 2, 'R_+14.9845': 2}, {'non-localized': 12, 'D_+15.0114': 5, 'M_+15.0114': 32, 'E_+15.0114': 8, 'Y_+15.0114': 4, 'S_-2.0156': 1, 'C-term_+17.0269': 4, 'R_+17.0269': 1, 'N-term_+15.0114': 3, 'D_+17.0269': 2, 'V_-2.0156': 2, 'C_-2.0156': 2, 'T_-2.0156': 1, 'K_+17.0269': 3, 'I_+15.0114': 1, 'L_+15.0114': 3}, {'A_+15.9951': 10, 'H_+15.9951': 1, 'non-localized': 61, 'M_+15.9951': 170, 'L_+15.9951': 15, 'P_+15.9951': 11, 'V_+15.9951': 9, 'T_+15.9951': 10, 'W_+15.9951': 24, 'E_+15.9951': 11, 'Q_+15.9951': 6, 'S_+15.9951': 9, 'C_+15.9951': 41, 'Y_+15.9951': 36, 'Q_+0.9842': 4, 'M_+15.0114': 3, 'G_+15.9951': 4, 'N-term_+15.9951': 20, 'N_+0.9842': 3, 'F_+15.9951': 11, 'I_+15.9951': 4, 'K_+15.9951': 8, 'C-term_+15.9951': 6, 'D_+15.9951': 4, 'I_+15.0114': 1, 'N_+15.9951': 7, 'N-term_+15.0114': 1, 'L_+15.0114': 3, 'E_+15.0114': 1, 'C-term_+0.9842': 1, 'R_+0.9842': 1}, {'non-localized': 85, 'G_+16.9976': 1, 'A_+16.9976': 7, 'V_+16.9976': 9, 'F_+16.9976': 3, 'S_+16.9976': 3, 'L_+16.9976': 4, 'Y_+16.9976': 5, 'I_+16.9976': 3, 'P_+16.9976': 4, 'M_+16.9976': 24, 'N_+16.9976': 3, 'C_+16.9976': 8, 'W_+16.9976': 6, 'T_+16.9976': 1, 'C-term_+16.9976': 1, 'K_+16.9976': 1, 'E_+16.9976': 3, 'N-term_+16.9976': 1}, {'C-term_+17.0269': 62, 'K_+17.0269': 46, 'R_+17.0269': 11, 'D_+17.0269': 95, 'E_+17.0269': 110, 'N-term_+17.0269': 5, 'non-localized': 10, 'P_+17.0269': 1, 'L_+17.0269': 1, 'V_+17.0269': 1, 'A_+17.0269': 1}, {'A_+18.0103': 1, 'S_+18.0103': 1, 'non-localized': 42, 'L_+18.0103': 2, 'W_+18.0103': 1, 'E_+18.0103': 2, 'N_+18.0103': 2, 'C_+18.0103': 1, 'I_+18.0103': 1, 'P_+18.0103': 1}, {'non-localized': 10, 'C_+18.0282': 14, 'D_+18.0282': 9, 'N-term_+18.0282': 1, 'Y_+18.0282': 1, 'E_+18.0282': 13, 'C-term_+18.0282': 4, 'R_+18.0282': 2, 'K_+18.0282': 1, 'P_+18.0282': 1}, {'W_+30.9811': 16, 'N-term_+30.9811': 1}, {'non-localized': 59, 'V_+31.9893': 3, 'P_+31.9893': 8, 'W_+31.9893': 35, 'E_+31.9893': 11, 'Y_+31.9893': 19, 'I_+31.9893': 5, 'Y_-25.0314': 3, 'G_+57.0217': 1, 'C-term_+31.9893': 17, 'K_+31.9893': 11, 'C_+31.9893': 6, 'L_+31.9893': 12, 'F_+31.9893': 14, 'R_+31.9893': 6, 'C_-25.0314': 1, 'M_+57.0217': 1, 'C_+57.0217': 1, 'H_+57.0217': 1, 'N-term_+31.9893': 11, 'M_+31.9893': 3}, {'non-localized': 33, 'Y_+32.9926': 4, 'E_+32.9926': 4, 'P_+32.9926': 3, 'W_+32.9926': 8, 'L_+32.9926': 2, 'M_+32.9926': 2, 'F_+32.9926': 1, 'I_+32.9926': 2, 'V_+32.9926': 1, 'C_+32.9926': 1}, {'I_+31.9893': 2, 'V_+15.9951': 3, 'non-localized': 18, 'E_+31.9893': 1, 'N_+15.9951': 1, 'W_+47.9847': 10, 'W_+31.9893': 15, 'C_+15.9951': 5, 'M_+15.9951': 2, 'F_+47.9847': 1, 'S_+15.9951': 1, 'E_+15.9951': 3, 'A_+15.9951': 1, 'Y_+47.9847': 8, 'C_+47.9847': 1, 'H_+57.0217': 1, 'C-term_-9.0368': 1, 'R_-9.0368': 1, 'N-term_+31.9893': 1, 'T_+15.9951': 1, 'W_+15.9951': 1}, {}, {'non-localized': 50, 'M_+57.0217': 39, 'S_+57.0217': 8, 'D_+57.0217': 3, 'T_+57.0217': 10, 'E_+57.0217': 12, 'C_+57.0217': 6, 'H_+57.0217': 37, 'G_+57.0217': 14, 'Y_+57.0217': 62, 'A_+57.0217': 14, 'C-term_+57.0217': 128, 'K_+57.0217': 141, 'N-term_+57.0217': 11}, {'E_+58.0243': 4, 'D_+58.0243': 3, 'Y_+58.0243': 14, 'non-localized': 43, 'M_+58.0243': 6, 'T_+58.0243': 6, 'A_+58.0243': 2, 'H_+58.0243': 11, 'Q_+58.0243': 3, 'N-term_+58.0243': 3, 'K_+58.0243': 11, 'S_+58.0243': 2, 'G_+58.0243': 2, 'L_+58.0243': 1, 'C-term_+58.0243': 5}, {'non-localized': 11, 'S_+100.0160': 49, 'T_+229.1628': 2, 'H_-129.1469': 5, 'N-term_+100.0160': 4, 'T_+100.0160': 2, 'S_+229.1628': 3, 'N-term_-129.1469': 1, 'N-term_+229.1628': 1}, {'T_+229.1628': 38, 'S_+229.1628': 83, 'N-term_+229.1628': 12, 'C-term_+229.1628': 3, 'K_+229.1628': 3, 'non-localized': 14, 'H_+229.1628': 12, 'Q_+229.1628': 1, 'G_+229.1628': 2, 'A_+229.1628': 1, 'D_+229.1628': 1, 'E_+229.1628': 1, 'F_+229.1628': 1, 'V_+229.1628': 1}, {'T_+230.1650': 15, 'N-term_+230.1650': 5, 'non-localized': 20, 'E_+230.1650': 2, 'S_+230.1650': 36, 'C-term_+230.1650': 1, 'K_+230.1650': 1, 'H_+230.1650': 4} ] )
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/tests.py
tests.py
import logging import logging.handlers import socketserver import struct import pickle import socket import tkinter as tk class LoggingToGUI(logging.Handler): # https://stackoverflow.com/a/18194597/1258041 def __init__(self, console): logging.Handler.__init__(self) self.console = console def emit(self, message): formattedMessage = self.format(message) self.console.configure(state=tk.NORMAL) self.console.insert(tk.END, formattedMessage + '\n') self.console.configure(state=tk.DISABLED) self.console.see(tk.END) class LogRecordStreamHandler(socketserver.StreamRequestHandler): """Handler for a streaming logging request.""" def __init__(self, *args, **kwargs): socketserver.StreamRequestHandler.__init__(self, *args, **kwargs) def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while True: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack('>L', chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.unPickle(chunk) record = logging.makeLogRecord(obj) self.handleLogRecord(record) def unPickle(self, data): return pickle.loads(data) def handleLogRecord(self, record): self._record_handler.handle(record) class LogRecordSocketReceiver(socketserver.ThreadingTCPServer): """ Simple TCP socket-based logging receiver suitable for testing. """ allow_reuse_address = True daemon_threads = True def __init__(self, host='localhost', port=logging.handlers.DEFAULT_TCP_LOGGING_PORT, handler=LogRecordStreamHandler): socketserver.ThreadingTCPServer.__init__(self, (host, port), handler) self.abort = 0 self.timeout = 1 self.logname = None def serve_until_stopped(self): import select abort = 0 while not abort: rd, wr, ex = select.select([self.socket.fileno()], [], [], self.timeout) if rd: self.handle_request() abort = self.abort tcpserver = None def _socket_listener_worker(logger, port, handler): global tcpserver try: tcpserver = LogRecordSocketReceiver(port=port, handler=handler) except socket.error as e: logger.error('Couldn\'t start TCP server: %s', e) return if port == 0: port = tcpserver.socket.getsockname()[1] tcpserver.serve_until_stopped() def get_logger(): logger = logging.getLogger('') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.DEBUG) logger.addHandler(stream_handler) formatter = logging.Formatter('{levelname:>8}: {asctime} {message}', datefmt='[%H:%M:%S]', style='{') stream_handler.setFormatter(formatter) logging.getLogger('matplotlib').setLevel(logging.WARNING) tcpHandler = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT) tcpHandler.setLevel(logging.INFO) logging.getLogger('AA_stat').addHandler(tcpHandler) return logger def get_aastat_handler(log_txt): class AAstatHandler(LogRecordStreamHandler): def __init__(self, *args, **kwargs): self._record_handler = LoggingToGUI(log_txt) formatter = logging.Formatter('{levelname:>8}: {asctime} {message}', datefmt='[%H:%M:%S]', style='{') self._record_handler.setFormatter(formatter) super().__init__(*args, **kwargs) return AAstatHandler
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/gui/logging.py
logging.py
import tkinter as tk from tkinter.scrolledtext import ScrolledText from tkinter.filedialog import askopenfilenames, askopenfilename, askdirectory, asksaveasfilename from functools import partial import os import threading import sys import logging import logging.handlers import pathlib import webbrowser import tempfile from idlelib.tooltip import Hovertip from . import logging as logutils from .shortcut import create_shortcut from ..version import version from .. import AA_stat, io AA_STAT_VERSION = version INPUT_FILES = [] INPUT_SPECTRA = [] OUTDIR = '.' PARAMS = None PARAMS_TMP = None logger = logutils.get_logger() class Args: """Emulates parsed args from argparse for AA_stat""" pepxml = mgf = mzml = csv = None params = PARAMS dir = '.' verbosity = 1 def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def get_input_filenames(label, activate): fnames = askopenfilenames(title='Open search results', filetypes=[('pepXML file', '*.pepXML'), ('XML file', '*.pep.xml'), ('CSV file', '*.[ct]sv')], multiple=True) if fnames: INPUT_FILES[:] = fnames label['text'] = f'{len(fnames)} open search files selected.' #+ '\n'.join(os.path.basename(f) for f in fnames) activate['state'] = tk.NORMAL def get_spectrum_filenames(label): fnames = askopenfilenames(title='Spectrum files', filetypes=[('mzML file', '*.mzML'), ('MGF file', '*.mgf')], multiple=True) if fnames: INPUT_SPECTRA[:] = fnames label['text'] = f'{len(fnames)} spectrum files selected.' # + '\n'.join(os.path.basename(f) for f in fnames) def get_outdir_name(label): global OUTDIR dirname = askdirectory(title='Output directory') if dirname: OUTDIR = dirname label['text'] = 'Output directory: ' + os.path.abspath(dirname) def get_params(label): global PARAMS PARAMS = askopenfilename(title='Parameters file', filetypes=[('Config files', '*.cfg'), ('INI files', '*.ini'), ('Text files', '*.txt'), ('All files', '*.*')]) label['text'] = "Loaded parameters: " + PARAMS def _save_params(txt, fname): global PARAMS PARAMS = fname with open(fname, 'w') as f: f.write(txt.get('1.0', tk.END)) def save_params(txt, writeback): global PARAMS_TMP if PARAMS is None: PARAMS_TMP = params = tempfile.NamedTemporaryFile(delete=False, suffix='.cfg').name logger.debug('Saving params to a temporary file: %s', params) writeback['text'] = "Using temporary parameters." else: PARAMS_TMP = None params = PARAMS logger.debug('Saving params to file: %s', params) writeback['text'] = "Using edited file: " + PARAMS _save_params(txt, params) def save_params_as(txt, writeback): global PARAMS PARAMS = asksaveasfilename(title='Save params as...') save_params(txt, writeback) def edit_params(w, writeback): window = tk.Toplevel(w) window.title('AA_stat GUI: edit parameters') window.geometry('900x600') params_txt = tk.Text(window) params = PARAMS or io.AA_STAT_PARAMS_DEFAULT with open(params) as f: for line in f: params_txt.insert(tk.END, line) params_txt.pack(fill=tk.BOTH, expand=True) save_frame = tk.Frame(window) save_btn = tk.Button(save_frame, text="Save", command=partial(save_params, params_txt, writeback)) save_btn.pack(side=tk.LEFT) save_as_btn = tk.Button(save_frame, text="Save As...", command=partial(save_params_as, params_txt, writeback)) save_as_btn.pack(side=tk.LEFT) save_frame.pack() def get_aa_stat_version(): if AA_STAT_VERSION: return 'AA_stat v' + AA_STAT_VERSION else: return 'AA_stat not installed.' def get_aa_stat_args(): pepxml, csv = [], [] for f in INPUT_FILES: ext = os.path.splitext(f)[1].lower() if ext in {'.pepxml', '.xml'}: pepxml.append(f) else: csv.append(f) mzml, mgf = [], [] for f in INPUT_SPECTRA: ext = os.path.splitext(f)[1].lower() if ext == '.mzml': mzml.append(f) else: mgf.append(f) args = Args(pepxml=pepxml, mgf=mgf, csv=csv, mzml=mzml, dir=OUTDIR, params=PARAMS) params_dict = io.get_params_dict(args) return args, params_dict def start_aastat(t): t.start() def run_aastat(run_btn, status_to, log_to): run_btn['state'] = tk.DISABLED status_to['text'] = 'Checking arguments...' args, params_dict = get_aa_stat_args() status_to['text'] = 'Running AA_stat...' AA_stat.AA_stat(params_dict, args) status_to['text'] = 'Done.' run_btn['state'] = tk.NORMAL run_btn['text'] = 'View report' run_btn['command'] = partial(view_report, run_btn) def view_report(btn): url = (pathlib.Path(os.path.abspath(OUTDIR)) / 'report.html').as_uri() webbrowser.open(url) def main(): if len(sys.argv) == 2 and sys.argv[1] == '--create-shortcut': create_shortcut() return window = tk.Tk() window.title('AA_stat GUI') window.geometry('900x600') try: try: window.tk.call('tk_getOpenFile', '-foobarbaz') except tk.TclError: pass window.tk.call('set', '::tk::dialog::file::showHiddenBtn', '1') window.tk.call('set', '::tk::dialog::file::showHiddenVar', '0') except: pass top_frame = tk.Frame() input_frame = tk.Frame(master=top_frame) spectra_frame = tk.Frame(master=top_frame) selected_spectra_lbl = tk.Label(master=spectra_frame, text="(optional)", justify='left') get_spectra_btn = tk.Button(master=spectra_frame, text="Select mzML or MGF files", command=partial(get_spectrum_filenames, selected_spectra_lbl), width=20) spectra_tip_text = ("If you provide original mzML or MGF files,\n" "AA_stat will perform MS/MS-based localization of mass shifts\nand recommend variable modifications.") Hovertip(spectra_frame, text=spectra_tip_text) get_spectra_btn.pack(side=tk.LEFT, anchor=tk.E) selected_spectra_lbl.pack(side=tk.LEFT, padx=15, anchor=tk.W) dir_frame = tk.Frame(master=top_frame) dir_lbl = tk.Label(master=dir_frame, text="Output directory: " + os.path.abspath(OUTDIR), justify='left') get_dir_btn = tk.Button(master=dir_frame, text="Select output directory", command=partial(get_outdir_name, dir_lbl), width=20) get_dir_btn.pack(side=tk.LEFT, anchor=tk.E) dir_lbl.pack(side=tk.LEFT, anchor=tk.W, padx=15) main_frame = tk.Frame() run_btn = tk.Button(master=main_frame, text='Run AA_stat', state=tk.DISABLED) status_lbl = tk.Label(master=main_frame, text=get_aa_stat_version()) log_txt = ScrolledText(master=main_frame, state=tk.DISABLED) t = threading.Thread(target=run_aastat, args=(run_btn, status_lbl, log_txt), name='aastat-runner') t.daemon = True run_btn['command'] = partial(start_aastat, t) AAstatHandler = logutils.get_aastat_handler(log_txt) log_t = threading.Thread(target=logutils._socket_listener_worker, args=(logger, logging.handlers.DEFAULT_TCP_LOGGING_PORT, AAstatHandler), name='aastat-listener') log_t.start() logger.debug('AA_stat logging initiated.') log_txt.pack(fill=tk.BOTH, expand=True) run_btn.pack() status_lbl.pack() selected_os_lbl = tk.Label(master=input_frame, text="No files selected", justify='left') get_os_files_btn = tk.Button(master=input_frame, text="Select open search files", command=partial(get_input_filenames, selected_os_lbl, run_btn), width=20) get_os_files_btn.pack(side=tk.LEFT, anchor=tk.E) selected_os_lbl.pack(side=tk.LEFT, padx=15, anchor=tk.W) Hovertip(input_frame, text="Specify open search results in pepXML or CSV format.") params_frame = tk.Frame(master=top_frame) params_lbl = tk.Label(master=params_frame, text="Using default parameters.") load_params_btn = tk.Button(master=params_frame, width=10, padx=4, text="Load params", command=partial(get_params, params_lbl)) edit_params_btn = tk.Button(master=params_frame, width=10, padx=4, text="Edit params", command=partial(edit_params, window, params_lbl)) load_params_btn.pack(side=tk.LEFT, fill=tk.X, anchor=tk.E) edit_params_btn.pack(side=tk.LEFT, fill=tk.X, anchor=tk.E) params_lbl.pack(side=tk.LEFT, fill=tk.X, anchor=tk.W, padx=15) input_frame.pack(side=tk.TOP, fill=tk.X, expand=True) spectra_frame.pack(side=tk.TOP, fill=tk.X, expand=True) dir_frame.pack(side=tk.TOP, fill=tk.X, expand=True) params_frame.pack(side=tk.TOP, fill=tk.X, expand=True) top_frame.pack() main_frame.pack(fill=tk.BOTH, expand=True) if not AA_STAT_VERSION: for btn in [get_spectra_btn, get_os_files_btn, get_dir_btn]: btn['state'] = tk.DISABLED window.mainloop() if PARAMS_TMP: logger.debug('Removing temporary file %s', PARAMS_TMP) os.remove(PARAMS_TMP) logutils.tcpserver.abort = 1 logutils.tcpserver.server_close() sys.exit() # needed because there are still working (daemon) threads if __name__ == '__main__': main()
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/gui/gui.py
gui.py
from distutils import dist import distutils.command.install as dist_install import os import sys import subprocess from .logging import get_logger def get_install_script_dir(): d = dist.Distribution() install_cmd = dist_install.install(d) install_cmd.finalize_options() return install_cmd.install_scripts def create_shortcut(): logger = get_logger() try: from pyshortcuts import make_shortcut except ImportError as e: logger.debug('Could not import pyshortcuts: %s', e.args[0]) logger.debug('Trying to pip install pyshortcuts...') try: subprocess.run([sys.executable, '-m', 'pip', 'install', 'pyshortcuts'], check=True) from pyshortcuts import make_shortcut except subprocess.CalledProcessError: logger.error('Could not install pyshortcuts.') try: make_shortcut(os.path.join(get_install_script_dir(), 'AA_stat_GUI'), name='AA_stat', description='AA_stat GUI', terminal=False) except Exception as e: logger.error(f'Could not create shortcut. Got a {type(e)}: {e.args[0]}') else: logger.info('Desktop shortcut created.')
AA-stat
/AA_stat-2.5.5-py3-none-any.whl/AA_stat/gui/shortcut.py
shortcut.py
import setuptools setuptools.setup( name= 'AAEEGGOOSS', version='0.3', author= 'AegosCOOOD', description='Gamaa', packages= setuptools.find_packages(), python_requires = '>=3.8', classifiers=[ "Programming Language :: Python :: 3", "Operating System :: OS Independent", "License :: OSI Approved :: MIT License" ] )
AAEEGGOOSS
/AAEEGGOOSS-0.3.tar.gz/AAEEGGOOSS-0.3/setup.py
setup.py
import os class a: def ae(m,b): return {'Aegos':m+b}
AAEEGGOOSS
/AAEEGGOOSS-0.3.tar.gz/AAEEGGOOSS-0.3/code/__init__.py
__init__.py
### AAFTF - Automatic Assembly For The Fungi *Jason Stajich and Jon Palmer* ![AAFTF logo](docs/AAFTF.png) Requirements =================== - BBTools - https://jgi.doe.gov/data-and-tools/bbtools/ - Trimmomatic - http://www.usadellab.org/cms/?page=trimmomatic (Optional) - bowtie2 - http://bowtie-bio.sourceforge.net/bowtie2/index.shtml (Optional) - bwa - https://github.com/lh3/bwa - Pilon - https://github.com/broadinstitute/pilon/wiki - sourmash (>=v3.5)- https://sourmash.readthedocs.io/ (install via conda/pip) - NCBI BLAST+ - ftp://ftp.ncbi.nlm.nih.gov/blast/executables/LATEST - minimap2 - https://github.com/lh3/minimap2 Assemblers - SPAdes - http://cab.spbu.ru/software/spades/ - megahit - https://github.com/voutcn/megahit - dipspades - (SPAdes 3.11.1 - note it is not part of later SPAdes packages) http://cab.spbu.ru/files/release3.11.1/dipspades_manual.html - NOVOplasty - https://github.com/ndierckx/NOVOPlasty Authors ============ * Jason Stajich [@hyphaltip](https://github.com/hyphaltip) - http://lab.stajich.org * Jon Palmer [@nextgenusfs](https://github.com/nextgenusfs) - https://twitter.com/jonpalmer2013 Install =========== We are working on simplifying the install, ie getting on Pypi and bioconda. Currently you could create conda environment and install like this: ``` conda create -n aaftf -c bioconda "python>=3.6" bbmap trimmomatic bowtie2 bwa pilon sourmash \ blast minimap2 spades megahit novoplasty biopython fastp ``` And then install this repo with git/pip: ``` $ conda activate aaftf $ python -m pip install git+https://github.com/stajichlab/AAFTF.git ``` Notes =========== This is partially a python re-write of [JAAWS](https://github.com/nextgenusfs/jaaws) which was a unix shell based cleanup and assembly tool written by Jon. Steps / Procedures ================== 1. trim Trim FASTQ input reads - with BBMap 2. mito De novo assemble mitochondrial genome 3. filter Filter contaminanting reads - with BBMap 4. assemble Assemble reads - with SPAdes 5. vecscreen Vector and Contaminant Screening of assembled contigs - with BlastN based method to replicate NCBI screening 6. sourpurge Purge contigs based on sourmash results - with sourmash 7. rmdup Remove duplicate contigs - using minimap2 to find duplicates 8. pilon Polish contig sequences with Pilon - uses Pilon 9. sort Sort contigs by length and rename FASTA headers 10. assess Assess completeness of genome assembly 11. pipeline Run AAFTF pipeline all in one go. # Typical runs ## Trimming and Filtering Trimming options spelled out: ``` usage: AAFTF trim [-h] [-q] [-o BASENAME] [-c cpus] [-ml MINLEN] -l LEFT [-r RIGHT] [-v] [--pipe] [--method {bbduk,trimmomatic}] [-m MEMORY] [--trimmomatic trimmomatic_jar] [--trimmomatic_adaptors TRIMMOMATIC_ADAPTORS] [--trimmomatic_clip TRIMMOMATIC_CLIP] [--trimmomatic_leadingwindow TRIMMOMATIC_LEADINGWINDOW] [--trimmomatic_trailingwindow TRIMMOMATIC_TRAILINGWINDOW] [--trimmomatic_slidingwindow TRIMMOMATIC_SLIDINGWINDOW] [--trimmomatic_quality TRIMMOMATIC_QUALITY] This command trims reads in FASTQ format to remove low quality reads and trim adaptor sequences optional arguments: -h, --help show this help message and exit -q, --quiet Do not output warnings to stderr -o BASENAME, --out BASENAME Output basename, default to base name of --left reads -c cpus, --cpus cpus Number of CPUs/threads to use. -ml MINLEN, --minlen MINLEN Minimum read length after trimming, default: 75 -l LEFT, --left LEFT left/forward reads of paired-end FASTQ or single-end FASTQ. -r RIGHT, --right RIGHT right/reverse reads of paired-end FASTQ. -v, --debug Provide debugging messages --pipe AAFTF is running in pipeline mode --method {bbduk,trimmomatic} Program to use for adapter trimming -m MEMORY, --memory MEMORY Max Memory (in GB) --trimmomatic trimmomatic_jar, --jar trimmomatic_jar Trimmomatic JAR path Trimmomatic options: Trimmomatic trimming options --trimmomatic_adaptors TRIMMOMATIC_ADAPTORS Trimmomatic adaptor file, default: TruSeq3-PE.fa --trimmomatic_clip TRIMMOMATIC_CLIP Trimmomatic clipping, default: ILLUMINACLIP:TruSeq3-PE.fa:2:30:10 --trimmomatic_leadingwindow TRIMMOMATIC_LEADINGWINDOW Trimmomatic window processing arguments, default: LEADING:3 --trimmomatic_trailingwindow TRIMMOMATIC_TRAILINGWINDOW Trimmomatic window processing arguments, default: TRAILING:3 --trimmomatic_slidingwindow TRIMMOMATIC_SLIDINGWINDOW Trimmomatic window processing arguments, default: SLIDINGWINDOW:4:15 --trimmomatic_quality TRIMMOMATIC_QUALITY Trimmomatic quality encoding -phred33 or phred64 ``` Example usage: ``` MEM=128 # 128gb BASE=STRAINX READSDIR=reads TRIMREAD=reads_trimmed CPU=8 AAFTF trim --method bbduk --memory $MEM -c $CPU \ --left $READSDIR/${BASE}_R1.fq.gz --right $READSDIR/${BASE}_R2.fq.gz \ -o $TRIMREAD/${BASE} # this step make take a lot of memory depending on how many filtering libraries you use AAFTF filter -c $CPU --memory $MEM --aligner bbduk \ -o $TRIMREAD/${BASE} --left $TRIMREAD/${BASE}_1P.fastq.gz --right $TRIMREAD/${BASE}_2P.fastq.gz ``` ## Assembly The specified assembler can be made through the `--method` option. The full set of options are below. ``` usage: AAFTF assemble [-h] [-q] [--method METHOD] -o OUT [-w WORKDIR] [-c cpus] [-m MEMORY] [-l LEFT] [-r RIGHT] [-v] [--tmpdir TMPDIR] [--assembler_args ASSEMBLER_ARGS] [--haplocontigs] [--pipe] Run assembler on cleaned reads optional arguments: -h, --help show this help message and exit -q, --quiet Do not output warnings to stderr --method METHOD Assembly method: spades, dipspades, megahit -o OUT, --out OUT Output assembly FASTA -w WORKDIR, --workdir WORKDIR assembly output directory -c cpus, --cpus cpus Number of CPUs/threads to use. -m MEMORY, --memory MEMORY Memory (in GB) setting for SPAdes. Default is 32 -l LEFT, --left LEFT Left (Forward) reads -r RIGHT, --right RIGHT Right (Reverse) reads -v, --debug Print Spades stdout to terminal --tmpdir TMPDIR Assembler temporary dir --assembler_args ASSEMBLER_ARGS Additional SPAdes/Megahit arguments --haplocontigs For dipSPAdes take the haplocontigs file --pipe AAFTF is running in pipeline mode ``` ``` CPU=24 MEM=96 LEFT=$TRIMREAD/${BASE}_filtered_1.fastq.gz RIGHT=$TRIMREAD/${BASE}_filtered_2.fastq.gz WORKDIR=working_AAFTF OUTDIR=genomes ASMFILE=$OUTDIR/${BASE}.spades.fasta mkdir -p $WORKDIR $OUTDIR AAFTF assemble -c $CPU --mem $MEM \ --left $LEFT --right $RIGHT \ -o $ASMFILE -w $WORKDIR/spades_$BASE ``` ## vectrim ``` CPU=16 MEM=16 LEFT=$TRIMREAD/${BASE}_filtered_1.fastq.gz RIGHT=$TRIMREAD/${BASE}_filtered_2.fastq.gz WORKDIR=working_AAFTF OUTDIR=genomes ASMFILE=$OUTDIR/${BASE}.spades.fasta VECTRIM=$OUTDIR/${BASE}.vecscreen.fasta mkdir -p $WORKDIR $OUTDIR AAFTF vecscreen -c $CPU -i $ASMFILE -o $VECTRIM ```
AAFTF
/AAFTF-0.4.1.tar.gz/AAFTF-0.4.1/README.md
README.md
"""AAFTF setup config script.""" import os from setuptools import setup from AAFTF.__version__ import __version__ version = __version__ long_description = """ ``AAFTF`` Automatic Assembly For The Fungi is a toolkit for automated genome assembly, cleanup, mitochondrial genome asm, and polishing' """ HERE = os.path.dirname(__file__) with open(os.path.join(HERE, "requirements.txt")) as f: install_requires = [x.strip() for x in f.readlines()] setup( name="AAFTF", version=version, install_requires=install_requires, requires=['python (>=3.6.0)'], packages=['AAFTF', 'scripts'], author="Jason Stajich, Jonathan Palmer", description='Automated genome assembly, cleanup, and polishing', long_description=long_description, url="http://github.com/stajichlab/AAFTF", package_dir={'AAFTF': "AAFTF"}, package_data={'AAFTF': ['test']}, zip_safe=False, include_package_data=True, # scripts=['AAFTF/scripts/AAFTF'], entry_points={ 'console_scripts': [ 'AAFTF=AAFTF.AAFTF_main:main', ], }, author_email="jasonstajich.phd@gmail.com", classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Topic :: Scientific/Engineering :: Bio-Informatics' ] )
AAFTF
/AAFTF-0.4.1.tar.gz/AAFTF-0.4.1/setup.py
setup.py
"""Init placeholder."""
AAFTF
/AAFTF-0.4.1.tar.gz/AAFTF-0.4.1/scripts/__init__.py
__init__.py
from setuptools import setup, find_packages setup(name="AASMessenger_Client", version="1.0.1", description="Messenger_Client", author="Anton Sobolev", author_email="antony.sobolev@gmail.com", packages=find_packages(), install_requires=['PyQt5', 'sqlalchemy', 'pycryptodome'] )
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/setup.py
setup.py
"""Клиентская часть приложения""" import argparse import os import sys import logging from Crypto.PublicKey import RSA from PyQt5.QtWidgets import QApplication, QMessageBox from common.settings import DEFAULT_IP_ADDRESS, DEFAULT_PORT from common.decos import log from common.errors import ServerError from client.database import ClientDatabase from client.transport import ClientTransport from client.main_window import ClientMainWindow from client.start_dialog import UserNameDialog # Инициализация клиентского логера: CLIENT_LOGGER = logging.getLogger('client') # Парсер аргументов коммандной строки @log def args_handler(): ''' Парсер аргументов командной строки, возвращает кортеж из 4 элементов адрес сервера, порт, имя пользователя, пароль. Выполняет проверку на корректность номера порта. ''' parser = argparse.ArgumentParser() parser.add_argument('addr', default=DEFAULT_IP_ADDRESS, nargs='?') parser.add_argument('port', type=int, default=DEFAULT_PORT, nargs='?') parser.add_argument('-n', '--name', default=None, nargs='?') parser.add_argument('-p', '--password', default='', nargs='?') args = parser.parse_args() server_address = args.addr server_port = args.port client_name = args.name client_passwd = args.password # Проверим подходит ли номер порта: if not 1023 < server_port < 65535: CLIENT_LOGGER.critical(f'Попытка запуска с неподходящим номером ' f'порта: {server_port}. Номер порта должен ' f'находиться в диапозоне от 1024 до 65535') exit(1) return server_address, server_port, client_name, client_passwd # Основная функция клиента if __name__ == '__main__': print('Консольный мессенджер. Клиентский модуль') # Загружаем параметы коммандной строки: server_address, server_port, client_name, client_passwd = args_handler() # Создаём клиентокое приложение client_app = QApplication(sys.argv) # Если имя пользователя не было указано в командной строке, # то запросим его: start_dialog = UserNameDialog() if not client_name or not client_passwd: client_app.exec_() # Если пользователь ввёл имя и нажал ОК, то сохраняем ведённое и # удаляем объект, иначе выходим: if start_dialog.ok_pressed: client_name = start_dialog.client_name.text() client_passwd = start_dialog.client_passwd.text() else: exit(0) CLIENT_LOGGER.info(f'Клиент запущен с параметрами: ' f'IP сервера: {server_address}, ' f'порт сервера: {server_port}, ' f'имя пользователя {client_name}') # Загружаем ключи с файла, если же файла нет, то генерируем новую пару: # dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path = os.getcwd() key_file = os.path.join(dir_path, f'{client_name}.key') if not os.path.exists(key_file): keys = RSA.generate(2048, os.urandom) with open(key_file, 'wb') as key: key.write(keys.export_key()) else: with open(key_file, 'rb') as key: keys = RSA.import_key(key.read()) keys.publickey().export_key() # Создаём объект базы данных database = ClientDatabase(client_name) # Создаём объект - транспорт и запускаем транспортный поток: try: transport = ClientTransport( server_port, server_address, database, client_name, client_passwd, keys) except ServerError as error: message = QMessageBox() message.critical(start_dialog, 'Ошибка сервера', error.text) exit(1) transport.setDaemon(True) transport.start() # Удалим объект диалога за ненадобностью del start_dialog # Создаём GUI main_window = ClientMainWindow(database, transport, keys) main_window.make_connection(transport) main_window.setWindowTitle(f'Messenger - alpha release - {client_name}') client_app.exec_() # Раз графическая оболочка закрылась, закрываем транспорт transport.transport_shutdown() transport.join()
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client.py
client.py
import sys from cx_Freeze import setup, Executable build_exe_options = { "packages": ["common", "logs", "client", "unit_test"], } setup( name="messenger_client", version="1.0.01", description="messenger_client", option={ "build_exe": build_exe_options }, executables=[Executable('client.py', base='Win32GUI', targetName='client.exe' )] )
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/setup_client.py
setup_client.py
"""Настройки журналирования клиентской части приложения""" import sys import os import logging from common.settings import LOGGING_LEVEL sys.path.append('../') # Создаём формировщик логов (formatter): CLIENT_FORMATTER = logging.Formatter( "%(asctime)s %(levelname)s %(module)s %(message)s") # Подготовка имени файла для логирования: # PATH = os.path.dirname(os.path.abspath(__file__)) PATH = os.getcwd() PATH = os.path.join(PATH, 'client.log') # Создаём потоки вывода логов: STREAM_HANDLER = logging.StreamHandler(sys.stderr) STREAM_HANDLER.setFormatter(CLIENT_FORMATTER) STREAM_HANDLER.setLevel(logging.ERROR) FILE_HANDLER = logging.FileHandler(PATH, encoding='utf-8') FILE_HANDLER.setFormatter(CLIENT_FORMATTER) # Создаём регистратор и настраиваем его LOGGER = logging.getLogger('client') LOGGER.addHandler(STREAM_HANDLER) LOGGER.addHandler(FILE_HANDLER) LOGGER.setLevel(LOGGING_LEVEL) # Отладка if __name__ == '__main__': LOGGER.critical('critical - тестовый запуск') LOGGER.error('error - тестовый запуск') LOGGER.warning('warning - тестовый запуск') LOGGER.info('info - тестовый запуск') LOGGER.debug('debug - тестовый запуск')
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/log/client_log_config.py
client_log_config.py
"""Настройки журналирования серверной части приложения""" import sys import os import logging.handlers from common.settings import LOGGING_LEVEL sys.path.append('../') # Создаём формировщик логов (formatter): SERVER_FORMATTER = logging.Formatter("%(asctime)s %(levelname)s " "%(module)s %(message)s") # Подготовка имени файла для логирования: # PATH = os.path.dirname(os.path.abspath(__file__)) PATH = os.getcwd() PATH = os.path.join(PATH, 'server.log') # Создаём потоки вывода логов: STREAM_HANDLER = logging.StreamHandler(sys.stderr) STREAM_HANDLER.setFormatter(SERVER_FORMATTER) STREAM_HANDLER.setLevel(logging.ERROR) FILE_HANDLER = logging.handlers.TimedRotatingFileHandler(PATH, interval=1, when='d', encoding='utf-8') FILE_HANDLER.setFormatter(SERVER_FORMATTER) # Создаём регистратор и настраиваем его: LOGGER = logging.getLogger('server') LOGGER.addHandler(STREAM_HANDLER) LOGGER.addHandler(FILE_HANDLER) LOGGER.setLevel(LOGGING_LEVEL) # Отладка: if __name__ == '__main__': LOGGER.critical('critical тестовый запуск') LOGGER.error('error тестовый запуск') LOGGER.warning('warning тестовый запуск') LOGGER.info('info тестовый запуск') LOGGER.debug('debug тестовый запуск')
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/log/server_log_config.py
server_log_config.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys # sys.path.insert(0, os.path.abspath('.')) path = os.path.dirname(os.path.abspath('../../../../launcher.py')) sys.path.insert(0, path) # -- Project information ----------------------------------------------------- project = 'Messenger' copyright = '2020, Sobolev Anton' author = 'Sobolev Anton' # The full version, including alpha/beta/rc tags release = '1.0.0' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/doc/source/conf.py
conf.py
from PyQt5.QtWidgets import QDialog, QPushButton, QLineEdit, QApplication, \ QLabel , qApp class UserNameDialog(QDialog): ''' Класс реализующий стартовый диалог с запросом логина и пароля пользователя. ''' def __init__(self): super().__init__() self.ok_pressed = False self.setWindowTitle('Привет!') self.setFixedSize(175, 135) self.label = QLabel('Введите имя пользователя:', self) self.label.move(10, 10) self.label.setFixedSize(150, 10) self.client_name = QLineEdit(self) self.client_name.setFixedSize(154, 20) self.client_name.move(10, 30) self.btn_ok = QPushButton('Начать', self) self.btn_ok.move(10, 105) self.btn_ok.clicked.connect(self.click) self.btn_cancel = QPushButton('Выход', self) self.btn_cancel.move(90, 105) self.btn_cancel.clicked.connect(qApp.exit) self.label_passwd = QLabel('Введите пароль:', self) self.label_passwd.move(10, 55) self.label_passwd.setFixedSize(150, 15) self.client_passwd = QLineEdit(self) self.client_passwd.setFixedSize(154, 20) self.client_passwd.move(10, 75) self.client_passwd.setEchoMode(QLineEdit.Password) self.show() def click(self): '''Метод обрабтчик кнопки ОК.''' if self.client_name.text() and self.client_passwd.text(): self.ok_pressed = True qApp.exit() if __name__ == '__main__': app = QApplication([]) dial = UserNameDialog() app.exec_()
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/start_dialog.py
start_dialog.py
import logging from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton from PyQt5.QtCore import Qt # Инициализация клиентского логера: CLIENT_LOGGER = logging.getLogger('client') # Диалог выбора контакта для добавления: class AddContactDialog(QDialog): ''' Диалог добавления пользователя в список контактов. Предлагает пользователю список возможных контактов и добавляет выбранный в контакты. ''' def __init__(self, transport, database): super().__init__() self.transport = transport self.database = database self.setFixedSize(350, 120) self.setWindowTitle('Выберите контакт для добавления:') self.setAttribute(Qt.WA_DeleteOnClose) self.setModal(True) self.selector_label = QLabel('Выберите контакт для добавления:', self) self.selector_label.setFixedSize(200, 20) self.selector_label.move(10, 0) self.selector = QComboBox(self) self.selector.setFixedSize(200, 20) self.selector.move(10, 30) self.btn_refresh = QPushButton('Обновить список', self) self.btn_refresh.setFixedSize(100, 30) self.btn_refresh.move(60, 60) self.btn_ok = QPushButton('Добавить', self) self.btn_ok.setFixedSize(100, 30) self.btn_ok.move(230, 20) self.btn_cancel = QPushButton('Отмена', self) self.btn_cancel.setFixedSize(100, 30) self.btn_cancel.move(230, 60) self.btn_cancel.clicked.connect(self.close) # Заполняем список возможных контактов: self.possible_contacts_update() # Назначаем действие на кнопку обновить: self.btn_refresh.clicked.connect(self.update_possible_contacts) def possible_contacts_update(self): ''' Метод заполнения списка возможных контактов. Создаёт список всех зарегистрированных пользователей за исключением уже добавленных в контакты и самого себя. ''' self.selector.clear() # Множества всех контактов и контактов клиента: contacts_list = set(self.database.get_contacts()) users_list = set(self.database.get_users()) # Удалим сами себя из списка пользователей, # чтобы нельзя было добавить самого себя: users_list.remove(self.transport.username) # Добавляем список возможных контактов: self.selector.addItems(users_list - contacts_list) def update_possible_contacts(self): ''' Метод обновления списка возможных контактов. Запрашивает с сервера список известных пользователей и обносляет содержимое окна. ''' try: self.transport.user_list_update() except OSError: pass else: CLIENT_LOGGER.debug('Обновление списка пользователей с сервера ' 'выполнено') self.possible_contacts_update()
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/add_contact.py
add_contact.py
import socket import time import logging import json import threading import hashlib import hmac import binascii from PyQt5.QtCore import pyqtSignal, QObject from common.utils import send_message, recv_message from common.settings import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, \ PUBLIC_KEY, RESPONSE, ERROR, DATA, RESPONSE_511, MESSAGE_TEXT, \ DESTINATION, SENDER, MESSAGE, GET_CONTACTS, LIST_INFO, USERS_REQUEST, \ PUBLIC_KEY_REQUEST, ADD_CONTACT, REMOVE_CONTACT, EXIT from common.errors import ServerError # Логер и объект блокировки для работы с сокетом: logger = logging.getLogger('client') socket_lock = threading.Lock() class ClientTransport(threading.Thread, QObject): ''' Класс реализующий транспортную подсистему клиентского модуля. Отвечает за взаимодействие с сервером. ''' # Сигналы новое сообщение и потеря соединения: new_message = pyqtSignal(dict) message_205 = pyqtSignal() connection_lost = pyqtSignal() def __init__(self, port, ip_address, database, username, passwd, keys): # Вызываем конструктор предка: threading.Thread.__init__(self) QObject.__init__(self) # Класс База данных - работа с базой: self.database = database # Имя пользователя: self.username = username # Пароль: self.password = passwd # Сокет для работы с сервером: self.transport = None # Набор ключей для шифрования: self.keys = keys # Устанавливаем соединение: self.connection_init(port, ip_address) # Обновляем таблицы известных пользователей и контактов: try: self.user_list_update() self.contacts_list_update() except OSError as err: if err.errno: logger.critical(f'Потеряно соединение с сервером.') raise ServerError('Потеряно соединение с сервером!') logger.error( 'Timeout соединения при обновлении списков пользователей.') except json.JSONDecodeError: logger.critical(f'Потеряно соединение с сервером.') raise ServerError('Потеряно соединение с сервером!') # Флаг продолжения работы транспорта: self.running = True # Функция инициализации соединения с сервером: def connection_init(self, port, ip): # Инициализация сокета и сообщение серверу о нашем появлении: self.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Таймаут необходим для освобождения сокета: self.transport.settimeout(5) # Соединяемся, 5 попыток соединения, # флаг успеха ставим в True если удалось: connected = False for i in range(5): logger.info(f'Попытка подключения №{i + 1}') try: self.transport.connect((ip, port)) except (OSError, ConnectionRefusedError): pass else: connected = True break time.sleep(1) # Если соединится не удалось - исключение if not connected: logger.critical('Не удалось установить соединение с сервером') raise ServerError('Не удалось установить соединение с сервером') logger.debug('Установлено соединение с сервером') # Запускаем процедуру авторизации. # Получаем хэш пароля: passwd_bytes = self.password.encode('utf-8') salt = self.username.lower().encode('utf-8') passwd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000) passwd_hash_string = binascii.hexlify(passwd_hash) # Получаем публичный ключ и декодируем его из байтов: pubkey = self.keys.publickey().export_key().decode('ascii') # Авторизируемся на сервере: with socket_lock: presense = { ACTION: PRESENCE, TIME: time.time(), USER: { ACCOUNT_NAME: self.username, PUBLIC_KEY: pubkey } } # Отправляем серверу приветственное сообщение: try: send_message(self.transport, presense) ans = recv_message(self.transport) # Если сервер вернул ошибку, бросаем исключение: if RESPONSE in ans: if ans[RESPONSE] == 400: raise ServerError(ans[ERROR]) elif ans[RESPONSE] == 511: # Если всё нормально, то продолжаем процедуру # авторизации: ans_data = ans[DATA] hash = hmac.new( passwd_hash_string, ans_data.encode('utf-8')) digest = hash.digest() my_ans = RESPONSE_511 my_ans[DATA] = binascii.b2a_base64( digest).decode('ascii') send_message(self.transport, my_ans) self.process_server_ans(recv_message(self.transport)) except (OSError, json.JSONDecodeError): raise ServerError('Сбой соединения в процессе авторизации.') def process_server_ans(self, message): '''Метод обработчик поступающих сообщений с сервера.''' logger.debug(f'Разбор сообщения от сервера: {message}') # Если это подтверждение чего-либо: if RESPONSE in message: if message[RESPONSE] == 200: return elif message[RESPONSE] == 400: raise ServerError(f'{message[ERROR]}') elif message[RESPONSE] == 205: self.user_list_update() self.contacts_list_update() self.message_205.emit() else: logger.error( f'Принят неизвестный код подтверждения {message[RESPONSE]}') # Если это сообщение от пользователя добавляем в базу, # даём сигнал о новом сообщении: elif ACTION in message and message[ACTION] == MESSAGE \ and SENDER in message and DESTINATION in message \ and MESSAGE_TEXT in message \ and message[DESTINATION] == self.username: logger.debug( f'Получено сообщение от пользователя ' f'{message[SENDER]}:{message[MESSAGE_TEXT]}') self.new_message.emit(message) def contacts_list_update(self): '''Метод обновляющий с сервера список контактов.''' self.database.contacts_clear() logger.debug(f'Запрос контакт листа для пользователся {self.name}') req = { ACTION: GET_CONTACTS, TIME: time.time(), USER: self.username } logger.debug(f'Сформирован запрос {req}') with socket_lock: send_message(self.transport, req) ans = recv_message(self.transport) logger.debug(f'Получен ответ {ans}') if RESPONSE in ans and ans[RESPONSE] == 202: for contact in ans[LIST_INFO]: self.database.add_contact(contact) else: logger.error('Не удалось обновить список контактов.') def user_list_update(self): '''Метод обновляющий с сервера список пользователей.''' logger.debug(f'Запрос списка известных пользователей {self.username}') req = { ACTION: USERS_REQUEST, TIME: time.time(), ACCOUNT_NAME: self.username } with socket_lock: send_message(self.transport, req) ans = recv_message(self.transport) if RESPONSE in ans and ans[RESPONSE] == 202: self.database.add_users(ans[LIST_INFO]) else: logger.error('Не удалось обновить список известных пользователей.') def key_request(self, user): '''Метод запрашивающий с сервера публичный ключ пользователя.''' logger.debug(f'Запрос публичного ключа для {user}') req = { ACTION: PUBLIC_KEY_REQUEST, TIME: time.time(), ACCOUNT_NAME: user } with socket_lock: send_message(self.transport, req) ans = recv_message(self.transport) if RESPONSE in ans and ans[RESPONSE] == 511: return ans[DATA] else: logger.error(f'Не удалось получить ключ собеседника{user}.') def add_contact(self, contact): '''Метод отправляющий на сервер сведения о добавлении контакта.''' logger.debug(f'Создание контакта {contact}') req = { ACTION: ADD_CONTACT, TIME: time.time(), USER: self.username, ACCOUNT_NAME: contact } with socket_lock: send_message(self.transport, req) self.process_server_ans(recv_message(self.transport)) def remove_contact(self, contact): '''Метод отправляющий на сервер сведения о удалении контакта.''' logger.debug(f'Удаление контакта {contact}') req = { ACTION: REMOVE_CONTACT, TIME: time.time(), USER: self.username, ACCOUNT_NAME: contact } with socket_lock: send_message(self.transport, req) self.process_server_ans(recv_message(self.transport)) def transport_shutdown(self): '''Метод уведомляющий сервер о завершении работы клиента.''' self.running = False message = { ACTION: EXIT, TIME: time.time(), ACCOUNT_NAME: self.username } with socket_lock: try: send_message(self.transport, message) except OSError: pass logger.debug('Транспорт завершает работу.') time.sleep(0.5) def send_message(self, to, message): '''Метод отправляющий на сервер сообщения для пользователя.''' message_dict = { ACTION: MESSAGE, SENDER: self.username, DESTINATION: to, TIME: time.time(), MESSAGE_TEXT: message } logger.debug(f'Сформирован словарь сообщения: {message_dict}') # Необходимо дождаться освобождения сокета для отправки сообщения: with socket_lock: send_message(self.transport, message_dict) self.process_server_ans(recv_message(self.transport)) logger.info(f'Отправлено сообщение для пользователя {to}') def run(self): '''Метод содержащий основной цикл работы транспортного потока.''' logger.debug('Запущен процесс - приёмник собщений с сервера.') while self.running: # Отдыхаем секунду и снова пробуем захватить сокет. # если не сделать тут задержку, то отправка может # достаточно долго ждать освобождения сокета: time.sleep(1) message = None with socket_lock: try: self.transport.settimeout(0.5) message = recv_message(self.transport) except OSError as err: if err.errno: logger.critical(f'Потеряно соединение с сервером.') self.running = False self.connection_lost.emit() # Проблемы с соединением except (ConnectionError, ConnectionAbortedError, ConnectionResetError, json.JSONDecodeError, TypeError): logger.debug(f'Потеряно соединение с сервером.') self.running = False self.connection_lost.emit() finally: self.transport.settimeout(5) # Если сообщение получено, то вызываем функцию обработчик: if message: logger.debug(f'Принято сообщение с сервера: {message}') self.process_server_ans(message)
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/transport.py
transport.py
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton, \ QApplication from PyQt5.QtCore import Qt class DelContactDialog(QDialog): ''' Диалог удаления контакта. Прделагает текущий список контактов, не имеет обработчиков для действий. ''' def __init__(self, database): super().__init__() self.database = database self.setFixedSize(350, 120) self.setWindowTitle('Выберите контакт для удаления:') self.setAttribute(Qt.WA_DeleteOnClose) self.setModal(True) self.selector_label = QLabel('Выберите контакт для удаления:', self) self.selector_label.setFixedSize(200, 20) self.selector_label.move(10, 0) self.selector = QComboBox(self) self.selector.setFixedSize(200, 20) self.selector.move(10, 30) self.btn_ok = QPushButton('Удалить', self) self.btn_ok.setFixedSize(100, 30) self.btn_ok.move(230, 20) self.btn_cancel = QPushButton('Отмена', self) self.btn_cancel.setFixedSize(100, 30) self.btn_cancel.move(230, 60) self.btn_cancel.clicked.connect(self.close) # Заполнитель контактов для удаления: self.selector.addItems(sorted(self.database.get_contacts())) if __name__ == '__main__': app = QApplication([]) window = DelContactDialog(None) window.show() app.exec_()
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/del_contact.py
del_contact.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'client.ui' # # Created by: PyQt5 UI code generator 5.11.3 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainClientWindow(object): def setupUi(self, MainClientWindow): MainClientWindow.setObjectName("MainClientWindow") MainClientWindow.resize(756, 534) MainClientWindow.setMinimumSize(QtCore.QSize(756, 534)) self.centralwidget = QtWidgets.QWidget(MainClientWindow) self.centralwidget.setObjectName("centralwidget") self.label_contacts = QtWidgets.QLabel(self.centralwidget) self.label_contacts.setGeometry(QtCore.QRect(10, 0, 101, 16)) self.label_contacts.setObjectName("label_contacts") self.btn_add_contact = QtWidgets.QPushButton(self.centralwidget) self.btn_add_contact.setGeometry(QtCore.QRect(10, 450, 121, 31)) self.btn_add_contact.setObjectName("btn_add_contact") self.btn_remove_contact = QtWidgets.QPushButton(self.centralwidget) self.btn_remove_contact.setGeometry(QtCore.QRect(140, 450, 121, 31)) self.btn_remove_contact.setObjectName("btn_remove_contact") self.label_history = QtWidgets.QLabel(self.centralwidget) self.label_history.setGeometry(QtCore.QRect(300, 0, 391, 21)) self.label_history.setObjectName("label_history") self.text_message = QtWidgets.QTextEdit(self.centralwidget) self.text_message.setGeometry(QtCore.QRect(300, 360, 441, 71)) self.text_message.setObjectName("text_message") self.label_new_message = QtWidgets.QLabel(self.centralwidget) self.label_new_message.setGeometry(QtCore.QRect(300, 330, 450, 16)) # Правка тут self.label_new_message.setObjectName("label_new_message") self.list_contacts = QtWidgets.QListView(self.centralwidget) self.list_contacts.setGeometry(QtCore.QRect(10, 20, 251, 411)) self.list_contacts.setObjectName("list_contacts") self.list_messages = QtWidgets.QListView(self.centralwidget) self.list_messages.setGeometry(QtCore.QRect(300, 20, 441, 301)) self.list_messages.setObjectName("list_messages") self.btn_send = QtWidgets.QPushButton(self.centralwidget) self.btn_send.setGeometry(QtCore.QRect(610, 450, 131, 31)) self.btn_send.setObjectName("btn_send") self.btn_clear = QtWidgets.QPushButton(self.centralwidget) self.btn_clear.setGeometry(QtCore.QRect(460, 450, 131, 31)) self.btn_clear.setObjectName("btn_clear") MainClientWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainClientWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 756, 21)) self.menubar.setObjectName("menubar") self.menu = QtWidgets.QMenu(self.menubar) self.menu.setObjectName("menu") self.menu_2 = QtWidgets.QMenu(self.menubar) self.menu_2.setObjectName("menu_2") MainClientWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainClientWindow) self.statusBar.setObjectName("statusBar") MainClientWindow.setStatusBar(self.statusBar) self.menu_exit = QtWidgets.QAction(MainClientWindow) self.menu_exit.setObjectName("menu_exit") self.menu_add_contact = QtWidgets.QAction(MainClientWindow) self.menu_add_contact.setObjectName("menu_add_contact") self.menu_del_contact = QtWidgets.QAction(MainClientWindow) self.menu_del_contact.setObjectName("menu_del_contact") self.menu.addAction(self.menu_exit) self.menu_2.addAction(self.menu_add_contact) self.menu_2.addAction(self.menu_del_contact) self.menu_2.addSeparator() self.menubar.addAction(self.menu.menuAction()) self.menubar.addAction(self.menu_2.menuAction()) self.retranslateUi(MainClientWindow) self.btn_clear.clicked.connect(self.text_message.clear) QtCore.QMetaObject.connectSlotsByName(MainClientWindow) def retranslateUi(self, MainClientWindow): _translate = QtCore.QCoreApplication.translate MainClientWindow.setWindowTitle(_translate("MainClientWindow", "Чат Программа alpha release")) self.label_contacts.setText(_translate("MainClientWindow", "Список контактов:")) self.btn_add_contact.setText(_translate("MainClientWindow", "Добавить контакт")) self.btn_remove_contact.setText(_translate("MainClientWindow", "Удалить контакт")) self.label_history.setText(_translate("MainClientWindow", "История сообщений:")) self.label_new_message.setText(_translate("MainClientWindow", "Введите новое сообщение:")) self.btn_send.setText(_translate("MainClientWindow", "Отправить сообщение")) self.btn_clear.setText(_translate("MainClientWindow", "Очистить поле")) self.menu.setTitle(_translate("MainClientWindow", "Файл")) self.menu_2.setTitle(_translate("MainClientWindow", "Контакты")) self.menu_exit.setText(_translate("MainClientWindow", "Выход")) self.menu_add_contact.setText(_translate("MainClientWindow", "Добавить контакт")) self.menu_del_contact.setText(_translate("MainClientWindow", "Удалить контакт"))
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/main_window_conv.py
main_window_conv.py
import json import logging import base64 from PyQt5.QtWidgets import QMainWindow, qApp, QMessageBox from PyQt5.QtGui import QStandardItemModel, QStandardItem, QBrush, QColor from PyQt5.QtCore import pyqtSlot, Qt from Crypto.Cipher import PKCS1_OAEP from Crypto.PublicKey import RSA from client.main_window_conv import Ui_MainClientWindow from client.add_contact import AddContactDialog from client.del_contact import DelContactDialog from common.errors import ServerError from common.settings import MESSAGE_TEXT, SENDER # Инициализация клиентского логера: logger = logging.getLogger('client') class ClientMainWindow(QMainWindow): ''' Класс - основное окно пользователя. Содержит всю основную логику работы клиентского модуля. Конфигурация окна создана в QTDesigner и загружается из конвертированого файла main_window_conv.py ''' def __init__(self, database, transport, keys): super().__init__() # Основные переменные: self.database = database self.transport = transport # Объект - дешифорвщик сообщений с предзагруженным ключём: self.decrypter = PKCS1_OAEP.new(keys) # Загружаем конфигурацию окна из дизайнера: self.ui = Ui_MainClientWindow() self.ui.setupUi(self) # Кнопка "Выход": self.ui.menu_exit.triggered.connect(qApp.exit) # Кнопка отправить сообщение: self.ui.btn_send.clicked.connect(self.send_message) # Кнопка "Добавить контакт": self.ui.btn_add_contact.clicked.connect(self.add_contact_window) self.ui.menu_add_contact.triggered.connect(self.add_contact_window) # Удалить контакт: self.ui.btn_remove_contact.clicked.connect(self.delete_contact_window) self.ui.menu_del_contact.triggered.connect(self.delete_contact_window) # Дополнительные требующиеся атрибуты: self.contacts_model = None self.history_model = None self.messages = QMessageBox() self.current_chat = None self.current_chat_key = None self.encryptor = None self.ui.list_messages.setHorizontalScrollBarPolicy( Qt.ScrollBarAlwaysOff) self.ui.list_messages.setWordWrap(True) # Даблклик по листу контактов отправляется в обработчик: self.ui.list_contacts.doubleClicked.connect(self.select_active_user) self.clients_list_update() self.set_disabled_input() self.show() def set_disabled_input(self): ''' Метод делающий поля ввода неактивными''' # Надпись - получатель: self.ui.label_new_message.setText( 'Для выбора получателя дважды кликните на нем в окне контактов.') self.ui.text_message.clear() if self.history_model: self.history_model.clear() # Поле ввода и кнопка отправки неактивны до выбора получателя: self.ui.btn_clear.setDisabled(True) self.ui.btn_send.setDisabled(True) self.ui.text_message.setDisabled(True) self.encryptor = None self.current_chat = None self.current_chat_key = None def history_list_update(self): ''' Метод заполняющий соответствующий QListView историей переписки с текущим собеседником. ''' # Получаем историю сортированную по дате: list = sorted( self.database.get_history( self.current_chat), key=lambda item: item[3]) # Если модель не создана, создадим: if not self.history_model: self.history_model = QStandardItemModel() self.ui.list_messages.setModel(self.history_model) # Очистим от старых записей: self.history_model.clear() # Берём не более 20 последних записей: length = len(list) start_index = 0 if length > 20: start_index = length - 20 # Заполнение модели записями, так-же стоит разделить входящие # и исходящие выравниванием и разным фоном. # Записи в обратном порядке, поэтому выбираем их с конца # и не более 20: for i in range(start_index, length): item = list[i] if item[1] == 'in': mess = QStandardItem( f'Входящее от {item[3].replace(microsecond=0)}:\n ' f'{item[2]}') mess.setEditable(False) mess.setBackground(QBrush(QColor(255, 213, 213))) mess.setTextAlignment(Qt.AlignLeft) self.history_model.appendRow(mess) else: mess = QStandardItem( f'Исходящее от {item[3].replace(microsecond=0)}:\n ' f'{item[2]}') mess.setEditable(False) mess.setTextAlignment(Qt.AlignRight) mess.setBackground(QBrush(QColor(204, 255, 204))) self.history_model.appendRow(mess) self.ui.list_messages.scrollToBottom() def select_active_user(self): '''Метод обработчик события двойного клика по списку контактов.''' # Выбранный пользователем (даблклик) находится # в выделеном элементе в QListView self.current_chat = self.ui.list_contacts.currentIndex().data() # Вызываем основную функцию: self.set_active_user() def set_active_user(self): '''Метод активации чата с собеседником.''' # Запрашиваем публичный ключ пользователя # и создаём объект шифрования: try: self.current_chat_key = self.transport.key_request( self.current_chat) logger.debug(f'Загружен открытый ключ для {self.current_chat}') if self.current_chat_key: self.encryptor = PKCS1_OAEP.new( RSA.import_key(self.current_chat_key)) except (OSError , json.JSONDecodeError): self.current_chat_key = None self.encryptor = None logger.debug(f'Не удалось получить ключ для {self.current_chat}') # Если ключа нет то ошибка, что не удалось начать чат с пользователем: if not self.current_chat_key: self.messages.warning( self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.') return # Ставим надпись и активируем кнопки: self.ui.label_new_message.setText( f'Введите сообщенние для {self.current_chat}:') self.ui.btn_clear.setDisabled(False) self.ui.btn_send.setDisabled(False) self.ui.text_message.setDisabled(False) # Заполняем окно историю сообщений по требуемому пользователю: self.history_list_update() def clients_list_update(self): '''Метод обновляющий список контактов.''' contacts_list = self.database.get_contacts() self.contacts_model = QStandardItemModel() for i in sorted(contacts_list): item = QStandardItem(i) item.setEditable(False) self.contacts_model.appendRow(item) self.ui.list_contacts.setModel(self.contacts_model) def add_contact_window(self): '''Метод создающий окно - диалог добавления контакта''' global select_dialog select_dialog = AddContactDialog(self.transport, self.database) select_dialog.btn_ok.clicked.connect( lambda: self.add_contact_action(select_dialog)) select_dialog.show() def add_contact_action(self, item): '''Метод обработчк нажатия кнопки "Добавить"''' new_contact = item.selector.currentText() self.add_contact(new_contact) item.close() def add_contact(self, new_contact): ''' Метод добавляющий контакт в серверную и клиентсткую BD. После обновления баз данных обновляет и содержимое окна. ''' try: self.transport.add_contact(new_contact) except ServerError as err: self.messages.critical(self, 'Ошибка сервера', err.text) except OSError as err: if err.errno: self.messages.critical( self, 'Ошибка', 'Потеряно соединение с сервером!') self.close() self.messages.critical(self, 'Ошибка', 'Таймаут соединения!') else: self.database.add_contact(new_contact) new_contact = QStandardItem(new_contact) new_contact.setEditable(False) self.contacts_model.appendRow(new_contact) logger.info(f'Успешно добавлен контакт {new_contact}') self.messages.information(self, 'Успех', 'Контакт успешно добавлен.') def delete_contact_window(self): '''Метод создающий окно удаления контакта.''' global remove_dialog remove_dialog = DelContactDialog(self.database) remove_dialog.btn_ok.clicked.connect( lambda: self.delete_contact(remove_dialog)) remove_dialog.show() def delete_contact(self, item): ''' Метод удаляющий контакт из серверной и клиентсткой BD. После обновления баз данных обновляет и содержимое окна. ''' selected = item.selector.currentText() try: self.transport.remove_contact(selected) except ServerError as err: self.messages.critical(self, 'Ошибка сервера', err.text) except OSError as err: if err.errno: self.messages.critical( self, 'Ошибка', 'Потеряно соединение с сервером!') self.close() self.messages.critical(self, 'Ошибка', 'Таймаут соединения!') else: self.database.del_contact(selected) self.clients_list_update() logger.info(f'Успешно удалён контакт {selected}') self.messages.information(self, 'Успех', 'Контакт успешно удалён.') item.close() # Если удалён активный пользователь, то деактивируем поля ввода: if selected == self.current_chat: self.current_chat = None self.set_disabled_input() def send_message(self): ''' Функция отправки сообщения текущему собеседнику. Реализует шифрование сообщения и его отправку. ''' # Текст в поле, проверяем что поле не пустое # затем забирается сообщение и поле очищается: message_text = self.ui.text_message.toPlainText() self.ui.text_message.clear() if not message_text: return # Шифруем сообщение ключом получателя и упаковываем в base64: message_text_encrypted = self.encryptor.encrypt( message_text.encode('utf8')) message_text_encrypted_base64 = base64.b64encode( message_text_encrypted) try: self.transport.send_message( self.current_chat, message_text_encrypted_base64.decode('ascii')) pass except ServerError as err: self.messages.critical(self, 'Ошибка', err.text) except OSError as err: if err.errno: self.messages.critical( self, 'Ошибка', 'Потеряно соединение с сервером!') self.close() self.messages.critical(self, 'Ошибка', 'Таймаут соединения!') except (ConnectionResetError, ConnectionAbortedError): self.messages.critical( self, 'Ошибка', 'Потеряно соединение с сервером!') self.close() else: self.database.save_message(self.current_chat, 'out', message_text) logger.debug( f'Отправлено сообщение для {self.current_chat}: {message_text}') self.history_list_update() @pyqtSlot(dict) def message(self, message): ''' Слот обработчик поступаемых сообщений, выполняет дешифровку поступаемых сообщений и их сохранение в истории сообщений. Запрашивает пользователя если пришло сообщение не от текущего собеседника. При необходимости меняет собеседника. ''' # Получаем строку байтов: encrypted_message = base64.b64decode(message[MESSAGE_TEXT]) # Декодируем строку, при ошибке выдаём сообщение и завершаем функцию try: decrypted_message = self.decrypter.decrypt(encrypted_message) except (ValueError , TypeError): self.messages.warning( self, 'Ошибка', 'Не удалось декодировать сообщение.') return # Сохраняем сообщение в базу и обновляем историю сообщений # или открываем новый чат: self.database.save_message( self.current_chat, 'in', decrypted_message.decode('utf8')) sender = message[SENDER] if sender == self.current_chat: self.history_list_update() else: # Проверим есть ли такой пользователь у нас в контактах: if self.database.check_contact(sender): # Если есть, спрашиваем и желании открыть с ним чат # и открываем при желании: if self.messages.question( self, 'Новое сообщение', f'Получено новое сообщение от {sender}, ' f'открыть чат с ним?', QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes: self.current_chat = sender self.set_active_user() else: print('NO') # Раз нету,спрашиваем хотим ли добавить юзера в контакты: if self.messages.question( self, 'Новое сообщение', f'Получено новое сообщение от {sender}.\n ' f'Данного пользователя нет в вашем контакт-листе.\n ' f'Добавить в контакты и открыть чат с ним?', QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes: self.add_contact(sender) self.current_chat = sender # Нужно заново сохранить сообщение, # иначе оно будет потеряно, # т.к. на момент предыдущего вызова контакта не было: self.database.save_message( self.current_chat, 'in', decrypted_message.decode('utf8')) self.set_active_user() @pyqtSlot() def connection_lost(self): ''' Слот обработчик потери соеднинения с сервером. Выдаёт окно предупреждение и завершает работу приложения. ''' self.messages.warning( self, 'Сбой соединения', 'Потеряно соединение с сервером. ') self.close() @pyqtSlot() def sig_205(self): ''' Слот выполняющий обновление баз данных по команде сервера. ''' if self.current_chat and not self.database.check_user( self.current_chat): self.messages.warning( self, 'Сочувствую', 'К сожалению собеседник был удалён с сервера.') self.set_disabled_input() self.current_chat = None self.clients_list_update() def make_connection(self, trans_obj): '''Метод обеспечивающий соединение сигналов и слотов.''' trans_obj.new_message.connect(self.message) trans_obj.connection_lost.connect(self.connection_lost) trans_obj.message_205.connect(self.sig_205)
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/main_window.py
main_window.py
import datetime import os from sqlalchemy import create_engine, Column, String, Text, DateTime, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base class ClientDatabase: ''' Класс - оболочка для работы с базой данных клиента. Использует SQLite базу данных, реализован с помощью SQLAlchemy ORM и используется декларативный подход. ''' Base = declarative_base() class KnowUsers(Base): ''' Класс - отображение для таблицы всех пользователей. ''' __tablename__ = 'know_users' id = Column(Integer, primary_key=True) username = Column(String, unique=True) def __init__(self, user): self.id = None self.username = user class MessageHistory(Base): ''' Класс - отображение для таблицы статистики переданных сообщений. ''' __tablename__ = 'message_history' id = Column(Integer, primary_key=True) contact = Column(String) direction = Column(String) message = Column(Text) date = Column(DateTime) def __init__(self, contact, direction, message): self.id = None self.contact = contact self.direction = direction self.message = message self.date = datetime.datetime.now() class Contacts(Base): ''' Класс - отображение для таблицы контактов. ''' __tablename__ = 'contacts' id = Column(Integer, primary_key=True) name = Column(String, unique=True) def __init__(self, contact): self.id = None self.name = contact # Конструктор класса: def __init__(self, name): # Создаём движок базы данных, поскольку разрешено несколько # клиентов одновременно, каждый должен иметь свою БД # Поскольку клиент мультипоточный необходимо отключить # проверки на подключения с разных потоков, # иначе sqlite3.ProgrammingError. # path = os.path.dirname(os.path.realpath(__file__)) path = os.getcwd() filename = f'client_{name}.db3' self.engine = create_engine(f'sqlite:///{os.path.join(path, filename)}', echo=False, pool_recycle=7200, connect_args={'check_same_thread': False}) # Создаём таблицы: self.Base.metadata.create_all(self.engine) # Создаём сессию: Session = sessionmaker(bind=self.engine) self.session = Session() # Очистка таблицы контактов, для подгрузки контактов с сервера: self.session.query(self.Contacts).delete() self.session.commit() def add_contact(self, contact): '''Метод добавляющий контакт в базу данных.''' if not self.session.query( self.Contacts).filter_by( name=contact).count(): contact_row = self.Contacts(contact) self.session.add(contact_row) self.session.commit() def contacts_clear(self): '''Метод очищающий таблицу со списком контактов.''' self.session.query(self.Contacts).delete() def del_contact(self, contact): '''Метод удаляющий определённый контакт.''' self.session.query(self.Contacts).filter_by(name=contact).delete() def add_users(self, users_list): '''Метод заполняющий таблицу известных пользователей.''' self.session.query(self.KnowUsers).delete() for user in users_list: user_row = self.KnowUsers(user) self.session.add(user_row) self.session.commit() def save_message(self, contact, direction, message): '''Метод сохраняющий сообщение в базе данных.''' message_row = self.MessageHistory(contact, direction, message) self.session.add(message_row) self.session.commit() def get_contacts(self): '''Метод возвращающий список всех контактов.''' return [contact[0] for contact in self.session.query(self.Contacts.name).all()] def get_users(self): '''Метод возвращающий список всех известных пользователей.''' return [user[0] for user in self.session.query(self.KnowUsers.username).all()] def check_user(self, user): '''Метод проверяющий существует ли пользователь.''' if self.session.query(self.KnowUsers).filter_by(username=user).count(): return True else: return False def check_contact(self, contact): '''Метод проверяющий существует ли контакт.''' if self.session.query(self.Contacts).filter_by(name=contact).count(): return True else: return False def get_history(self, contact): '''Метод возвращающий историю сообщений с определённым пользователем.''' query = self.session.query( self.MessageHistory).filter_by( contact=contact) return [(history_row.contact, history_row.direction, history_row.message, history_row.date) for history_row in query.all()] # Отладка: if __name__ == '__main__': test_db = ClientDatabase('test1') # for i in ['test3', 'test4', 'test5']: # test_db.add_contact(i) # test_db.add_contact('test4') # test_db.add_users(['test1', 'test2', 'test3', 'test4', 'test5']) # test_db.save_message('test2', 'in', f'Привет! я тестовое сообщение от {datetime.datetime.now()}!') # test_db.save_message('test2', 'out', f'Привет! я другое тестовое сообщение от {datetime.datetime.now()}!') # print(test_db.get_contacts()) # print(test_db.get_users()) # print(test_db.check_user('test1')) # print(test_db.check_user('test10')) print(sorted(test_db.get_history('test2'), key=lambda item: item[3])) # test_db.del_contact('test4') # print(test_db.get_contacts())
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/database.py
database.py
"""Настройки по умолчанию""" import logging # Порт по умолчанию DEFAULT_PORT = 7777 # IP адрес для подключения по умолчанию DEFAULT_IP_ADDRESS = '127.0.0.1' # Максимальная очередь подключения клиента MAX_CONNECTIONS = 5 # Максимальная длина сообщения в байтах MAX_PACKAGE_LENGTH = 10240 # Кодировка проекта ENCODING = 'utf-8' # Текущий уровень логирования LOGGING_LEVEL = logging.DEBUG # База данных для хранения данных сервера SERVER_CONFIG = 'server.ini' # Потокол JIM. Основные ключи ACTION = 'action' TIME = 'time' USER = 'user' ACCOUNT_NAME = 'account_name' SENDER = 'from' DESTINATION = 'to' DATA = 'bin' PUBLIC_KEY = 'pubkey' # Прочие ключи, используемые в протоколе PRESENCE = 'presence' RESPONSE = 'response' ERROR = 'error' MESSAGE = 'message' MESSAGE_TEXT = 'msg_text' EXIT = 'exit' GET_CONTACTS = 'get_contacts' LIST_INFO = 'data_list' REMOVE_CONTACT = 'remove' ADD_CONTACT = 'add' USERS_REQUEST = 'get_users' PUBLIC_KEY_REQUEST = 'pubkey_need' # Словари с ответами # 200 RESPONSE_200 = {RESPONSE: 200} # 202 RESPONSE_202 = { RESPONSE: 202, LIST_INFO: None } # 400 RESPONSE_400 = { RESPONSE: 400, ERROR: None } # 205 RESPONSE_205 = { RESPONSE: 205 } # 511 RESPONSE_511 = { RESPONSE: 511, DATA: None }
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/settings.py
settings.py
import sys import logging import socket sys.path.append('../') # Метод определения модуля, источника запуска: if sys.argv[0].find('client') == -1: LOGGER = logging.getLogger('server') else: LOGGER = logging.getLogger('client') def log(func_for_log): ''' Декоратор, выполняющий логирование вызовов функций. Сохраняет события типа debug, содержащие информацию о имени вызываемой функиции, параметры с которыми вызывается функция, и модуль, вызывающий функцию. ''' def log_create(*args, **kwargs): res = func_for_log(*args, **kwargs) LOGGER.debug(f'Вызвана функция {func_for_log.__name__} с параматреми ' f'{args}, {kwargs}. Вызов из модуля ' f'{func_for_log.__module__}') return res return log_create def login_required(func): ''' Декоратор, проверяющий, что клиент авторизован на сервере. Проверяет, что передаваемый объект сокета находится в списке авторизованных клиентов. За исключением передачи словаря-запроса на авторизацию. Если клиент не авторизован, генерирует исключение TypeError ''' def checker(*args, **kwargs): # Проверяем, что первый аргумент - экземпляр MessageProcessor # Импортировать необходимо тут, иначе ошибка рекурсивного импорта. from server.server.core import MessageProcessor from server.common.settings import ACTION, PRESENCE if isinstance(args[0], MessageProcessor): found = False for arg in args: if isinstance(arg, socket.socket): # Проверяем, что данный сокет есть в списке names # класса MessageProcessor for client in args[0].names: if args[0].names[client] == arg: found = True # Теперь надо проверить, что передаваемые аргументы # не presence сообщение for arg in args: if isinstance(arg, dict): if ACTION in arg and arg[ACTION] == PRESENCE: found = True # Если не авторизован и не сообщение начала авторизации, # то вызываем исключение. if not found: raise TypeError return func(*args, **kwargs) return checker
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/decos.py
decos.py
class ServerError(Exception): ''' Класс - исключение, для обработки ошибок сервера. При генерации требует строку с описанием ошибки, полученную с сервера. ''' def __init__(self, text): self.text = text def __str__(self): return self.text
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/errors.py
errors.py
import json import sys sys.path.append('../') from common.settings import MAX_PACKAGE_LENGTH, ENCODING from common.decos import log @log def recv_message(client): ''' Функция приёма сообщений от удалённых компьютеров. Принимает сообщения JSON, декодирует полученное сообщение и проверяет что получен словарь. :param client: сокет для передачи данных. :return: словарь - сообщение. ''' encoded_response = client.recv(MAX_PACKAGE_LENGTH) json_response = encoded_response.decode(ENCODING) response = json.loads(json_response) if isinstance(response, dict): return response else: raise TypeError @log def send_message(sock, message): ''' Функция отправки словарей через сокет. Кодирует словарь в формат JSON и отправляет через сокет. :param sock: сокет для передачи :param message: словарь для передачи :return: ничего не возвращает ''' json_message = json.dumps(message) encoded_message = json_message.encode(ENCODING) sock.send(encoded_message)
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/utils.py
utils.py
import dis class ServerVerifier(type): ''' Метакласс, проверяющий что в результирующем классе нет клиентских вызовов таких как: connect. Также проверяется, что серверный сокет является TCP и работает по IPv4 протоколу. ''' def __init__(self, clsname, bases, clsdict): # Список методов, которые используются в функциях класса: methods = [] # Атрибуты, вызываемые функциями классов: attrs = [] for func in clsdict: try: ret = dis.get_instructions(clsdict[func]) # Если не функция то ловим исключение: except TypeError: pass else: # Раз функция разбираем код, получая используемые методы и # атрибуты. for instr in ret: if instr.opname == 'LOAD_GLOBAL': if instr.argval not in methods: methods.append(instr.argval) elif instr.opname == 'LOAD_ATTR': if instr.argval not in attrs: attrs.append(instr.argval) # Если обнаружено использование недопустимого метода connect, # генерируем исключение: if 'connect' in methods: raise TypeError('Использование метода connect недопустимо ' 'в серверной части приложения') # Если сокет не инициализировался константами SOCK_STREAM(TCP) # AF_INET(IPv4), тоже исключение. if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs): raise TypeError('Некорректная инициализация сокета.') super().__init__(clsname, bases, clsdict) class ClientVerifier(type): ''' Метакласс, проверяющий что в результирующем классе нет серверных вызовов таких как: accept, listen. Также проверяется, что сокет не создаётся внутри конструктора класса. ''' def __init__(self, clsname, bases, clsdict): # Список методов, которые используются в функциях класса: methods = [] for func in clsdict: try: ret = dis.get_instructions(clsdict[func]) # Если не функция то ловим исключение: except TypeError: pass else: # Раз функция разбираем код, получая используемые методы: for instr in ret: if instr.opname == 'LOAD_GLOBAL': if instr.argval not in methods: methods.append(instr.argval) # Если обнаружено использование недопустимого метода accept, listen, # socket бросаем исключение: for command in ('accept', 'listen', 'socket'): if command in methods: raise TypeError( 'Обнаружено использование недопустимого метода') # Вызов get_message или send_message из utils считаем корректным # использованием сокетов if 'recv_message' in methods or 'send_message' in methods: pass else: raise TypeError( 'Отсутствуют вызовы функций, работающих с сокетами') super().__init__(clsname, bases, clsdict)
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/metaclasses.py
metaclasses.py
import logging import sys import ipaddress import socket SERVER_LOGGER = logging.getLogger('server') class ServerPort: ''' Класс - дескриптор для номера порта. Позволяет использовать только порты с 1023 по 65536. При попытке установить неподходящий номер порта генерирует исключение. ''' def __set__(self, instance, port): if not 1023 < port < 65535: SERVER_LOGGER.critical(f'Попытка запуска с неподходящим номером ' f'порта: {port}. Номер порта должен ' f'находиться в диапозоне от 1024 до 65535') raise TypeError('Некорректный номер порта') instance.__dict__[self.port] = port def __set_name__(self, owner, port): self.port = port class ServerAddress: ''' Класс - дескриптор для ip адреса. Позволяет использовать только ip адреса или имя хоста. При попытке ввода неподходящего адреса генерирует исключение. ''' def __set__(self, instance, value): try: ipaddress.ip_address(socket.gethostbyname(value)) except socket.gaierror: SERVER_LOGGER.critical(f'Попытка запуска с неккоректным ' f'ip адресом: {value}') sys.exit(1) instance.__dict__[self.name] = value def __set_name__(self, owner, name): self.name = name
AASMessenger_Client
/AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/descriptors.py
descriptors.py
from setuptools import setup, find_packages setup(name="AASMessenger_Server", version="1.0.1", description="Messenger_Server", author="Anton Sobolev", author_email="antony.sobolev@gmail.com", packages=find_packages(), install_requires=['PyQt5', 'sqlalchemy', 'pycryptodome'] )
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/setup.py
setup.py
import sys from cx_Freeze import setup, Executable build_exe_options = { "packages": ["common", "logs", "server", "unit_test"], } setup( name="messenger_server", version="1.0.01", description="messenger_server", option={ "build_exe": build_exe_options }, executables=[Executable('server.py', base='Win32GUI', targetName='server.exe' )] )
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/setup_server.py
setup_server.py
"""Серверная часть приложения""" import argparse import configparser import os import sys import logging from PyQt5.QtWidgets import QApplication from PyQt5.QtCore import Qt from common.settings import DEFAULT_PORT from log import server_log_config from server.core import MessageProcessor from server.database import ServerDataBase from server.main_window import MainWindow from common.decos import log # Инициализация логирования SERVER_LOGGER = logging.getLogger('server') @log def args_handler(default_port, default_address): '''Парсер аргументов коммандной строки.''' SERVER_LOGGER.debug( f'Инициализация парсера аргументов коммандной строки: {sys.argv}') parser = argparse.ArgumentParser() parser.add_argument('-p', dest='port', type=int, default=default_port, nargs='?') parser.add_argument('-a', dest='ip', default=default_address, nargs='?') parser.add_argument('--no_gui', action='store_true') args = parser.parse_args() listen_address = args.ip listen_port = args.port gui_flag = args.no_gui SERVER_LOGGER.debug('Аргументы успешно загружены.') return listen_address, listen_port, gui_flag @log def config_load(): '''Парсер конфигурационного ini файла.''' config = configparser.ConfigParser() # dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path = os.getcwd() config.read(f"{dir_path}/{'server.ini'}") # Если конфиг файл загружен правильно, запускаемся, # иначе конфиг по умолчанию. if 'SETTINGS' in config: return config else: config.add_section('SETTINGS') config.set('SETTINGS', 'Default_port', str(DEFAULT_PORT)) config.set('SETTINGS', 'Listen_Address', '') config.set('SETTINGS', 'Database_path', '') config.set('SETTINGS', 'Database_file', 'server_database.db3') return config @log def main(): '''Запуск серверного приложения''' # Загрузка файла конфигурации сервера: config = config_load() # Загрузка параметров командной строки, если нет параметров, # то задаём значения по умоланию: listen_address, listen_port, gui_flag = args_handler( config['SETTINGS']['Default_port'], config['SETTINGS']['Listen_Address']) # Инициализация базы данных database = ServerDataBase( os.path.join( config['SETTINGS']['Database_path'], config['SETTINGS']['Database_file'])) # Создание экземпляра класса - сервера и его запуск: server = MessageProcessor(listen_address, listen_port, database) server.deamon = True server.start() # Если указан параметр без GUI то запускаем обработчик # консольного ввода: if gui_flag: while True: command = input('Введите exit для завершения работы сервера.') # Если выход, то завршаем основной цикл сервера: if command == 'exit': server.running = False server.join() break # Если не указан запуск без GUI, то запускаем GUI: else: server_app = QApplication(sys.argv) server_app.setAttribute(Qt.AA_DisableWindowContextHelpButton) main_window = MainWindow(database, server, config) # Запускаем GUI: server_app.exec_() # По закрытию окон останавливаем обработчик сообщений: server.running = False if __name__ == '__main__': main()
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server.py
server.py
"""Настройки журналирования клиентской части приложения""" import sys import os import logging from common.settings import LOGGING_LEVEL sys.path.append('../') # Создаём формировщик логов (formatter): CLIENT_FORMATTER = logging.Formatter( "%(asctime)s %(levelname)s %(module)s %(message)s") # Подготовка имени файла для логирования: # PATH = os.path.dirname(os.path.abspath(__file__)) PATH = os.getcwd() PATH = os.path.join(PATH, 'client.log') # Создаём потоки вывода логов: STREAM_HANDLER = logging.StreamHandler(sys.stderr) STREAM_HANDLER.setFormatter(CLIENT_FORMATTER) STREAM_HANDLER.setLevel(logging.ERROR) FILE_HANDLER = logging.FileHandler(PATH, encoding='utf-8') FILE_HANDLER.setFormatter(CLIENT_FORMATTER) # Создаём регистратор и настраиваем его LOGGER = logging.getLogger('client') LOGGER.addHandler(STREAM_HANDLER) LOGGER.addHandler(FILE_HANDLER) LOGGER.setLevel(LOGGING_LEVEL) # Отладка if __name__ == '__main__': LOGGER.critical('critical - тестовый запуск') LOGGER.error('error - тестовый запуск') LOGGER.warning('warning - тестовый запуск') LOGGER.info('info - тестовый запуск') LOGGER.debug('debug - тестовый запуск')
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/log/client_log_config.py
client_log_config.py
"""Настройки журналирования серверной части приложения""" import sys import os import logging.handlers from common.settings import LOGGING_LEVEL sys.path.append('../') # Создаём формировщик логов (formatter): SERVER_FORMATTER = logging.Formatter("%(asctime)s %(levelname)s " "%(module)s %(message)s") # Подготовка имени файла для логирования: # PATH = os.path.dirname(os.path.abspath(__file__)) PATH = os.getcwd() PATH = os.path.join(PATH, 'server.log') # Создаём потоки вывода логов: STREAM_HANDLER = logging.StreamHandler(sys.stderr) STREAM_HANDLER.setFormatter(SERVER_FORMATTER) STREAM_HANDLER.setLevel(logging.ERROR) FILE_HANDLER = logging.handlers.TimedRotatingFileHandler(PATH, interval=1, when='d', encoding='utf-8') FILE_HANDLER.setFormatter(SERVER_FORMATTER) # Создаём регистратор и настраиваем его: LOGGER = logging.getLogger('server') LOGGER.addHandler(STREAM_HANDLER) LOGGER.addHandler(FILE_HANDLER) LOGGER.setLevel(LOGGING_LEVEL) # Отладка: if __name__ == '__main__': LOGGER.critical('critical тестовый запуск') LOGGER.error('error тестовый запуск') LOGGER.warning('warning тестовый запуск') LOGGER.info('info тестовый запуск') LOGGER.debug('debug тестовый запуск')
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/log/server_log_config.py
server_log_config.py
from PyQt5.QtWidgets import QDialog, QLabel, QLineEdit, QPushButton, \ QFileDialog, QMessageBox from PyQt5.QtCore import Qt import os class ConfigWindow(QDialog): '''Класс окно настроек.''' def __init__(self, config): super().__init__() self.config = config self.initUI() def initUI(self): '''Настройки окна''' self.setFixedSize(365, 260) self.setWindowTitle('Настройки сервера') self.setAttribute(Qt.WA_DeleteOnClose) self.setModal(True) # Надпись о файле базы данных: self.db_path_label = QLabel('Путь до файла базы данных: ', self) self.db_path_label.move(10, 10) self.db_path_label.setFixedSize(240, 15) # Строка с путём базы: self.db_path = QLineEdit(self) self.db_path.setFixedSize(250, 20) self.db_path.move(10, 30) self.db_path.setReadOnly(True) # Кнопка выбора пути: self.db_path_select = QPushButton('Обзор...', self) self.db_path_select.move(275, 28) # Метка с именем поля файла базы данных: self.db_file_label = QLabel('Имя файла базы данных: ', self) self.db_file_label.move(10, 68) self.db_file_label.setFixedSize(180, 15) # Поле для ввода имени файла: self.db_file = QLineEdit(self) self.db_file.move(200, 66) self.db_file.setFixedSize(150, 20) # Метка с номером порта: self.port_label = QLabel('Номер порта для соединений:', self) self.port_label.move(10, 108) self.port_label.setFixedSize(180, 15) # Поле для ввода номера порта: self.port = QLineEdit(self) self.port.move(200, 108) self.port.setFixedSize(150, 20) # Метка с адресом для соединений: self.ip_label = QLabel('С какого IP принимаем соединения:', self) self.ip_label.move(10, 148) self.ip_label.setFixedSize(180, 15) # Метка с напоминанием о пустом поле: self.ip_label_note = QLabel(' оставьте это поле пустым, чтобы\n принимать соединения с любых адресов.', self) self.ip_label_note.move(10, 168) self.ip_label_note.setFixedSize(500, 30) # Поле для ввода ip: self.ip = QLineEdit(self) self.ip.move(200, 148) self.ip.setFixedSize(150, 20) # Кнопка сохранения настроек: self.save_btn = QPushButton('Сохранить', self) self.save_btn.move(190, 220) # Кнопка закрытия окна: self.close_button = QPushButton('Закрыть', self) self.close_button.move(275, 220) self.close_button.clicked.connect(self.close) self.db_path_select.clicked.connect(self.open_file_dialog) self.show() self.db_path.insert(self.config['SETTINGS']['Database_path']) self.db_file.insert(self.config['SETTINGS']['Database_file']) self.port.insert(self.config['SETTINGS']['Default_port']) self.ip.insert(self.config['SETTINGS']['Listen_Address']) self.save_btn.clicked.connect(self.save_server_config) def open_file_dialog(self): '''Метод обработчик открытия окна выбора папки.''' global dialog dialog = QFileDialog(self) path = dialog.getExistingDirectory() path = path.replace('/', '\\') self.db_path.clear() self.db_path.insert(path) def save_server_config(self): ''' Метод сохранения настроек. Проверяет правильность введённых данных и если всё правильно сохраняет ini файл. ''' global config_window message = QMessageBox() self.config['SETTINGS']['Database_path'] = self.db_path.text() self.config['SETTINGS']['Database_file'] = self.db_file.text() try: port = int(self.port.text()) except ValueError: message.warning(self, 'Ошибка', 'Порт должен быть числом') else: self.config['SETTINGS']['Listen_Address'] = self.ip.text() if 1023 < port < 65536: self.config['SETTINGS']['Default_port'] = str(port) # dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path = os.getcwd() dir_path = os.path.join(dir_path, '..') with open(f"{dir_path}/{'server.ini'}", 'w') as conf: self.config.write(conf) message.information(self, 'OK', 'Настройки успешно сохранены!') else: message.warning(self, 'Ошибка', 'Порт должен быть от 1024 до 65536')
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/config_window.py
config_window.py
from PyQt5.QtWidgets import QDialog, QPushButton, QTableView from PyQt5.QtGui import QStandardItemModel, QStandardItem from PyQt5.QtCore import Qt class StatWindow(QDialog): ''' Класс - окно со статистикой пользователей ''' def __init__(self, database): super().__init__() self.database = database self.initUI() def initUI(self): # Настройки окна: self.setWindowTitle('Статистика клиентов') self.setFixedSize(600, 700) self.setAttribute(Qt.WA_DeleteOnClose) # Кнапка закрытия окна self.close_button = QPushButton('Закрыть', self) self.close_button.move(250, 650) self.close_button.clicked.connect(self.close) # Лист с собственно статистикой self.stat_table = QTableView(self) self.stat_table.move(10, 10) self.stat_table.setFixedSize(580, 620) self.create_stat_model() # Функция реализующая заполнение таблицы историей сообщений. def create_stat_model(self): '''Метод реализующий заполнение таблицы статистикой сообщений.''' # Список записей из базы stat_list = self.database.message_history() # Объект модели данных: list = QStandardItemModel() list.setHorizontalHeaderLabels( ['Имя Клиента', 'Последний раз входил', 'Сообщений отправлено', 'Сообщений получено']) for row in stat_list: user, last_seen, sent, recvd = row user = QStandardItem(user) user.setEditable(False) last_seen = QStandardItem(str(last_seen.replace(microsecond=0))) last_seen.setEditable(False) sent = QStandardItem(str(sent)) sent.setEditable(False) recvd = QStandardItem(str(recvd)) recvd.setEditable(False) list.appendRow([user, last_seen, sent, recvd]) self.stat_table.setModel(list) self.stat_table.resizeColumnsToContents() self.stat_table.resizeRowsToContents()
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/stat_window.py
stat_window.py
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton from PyQt5.QtCore import Qt class DelUserDialog(QDialog): ''' Класс - диалог выбора контакта для удаления. ''' def __init__(self, database , server): super().__init__() self.database = database self.server = server self.setFixedSize(350, 120) self.setWindowTitle('Удаление пользователя') self.setAttribute(Qt.WA_DeleteOnClose) self.setModal(True) self.selector_label = QLabel( 'Выберите пользователя для удаления:', self) self.selector_label.setFixedSize(200, 20) self.selector_label.move(10, 0) self.selector = QComboBox(self) self.selector.setFixedSize(200, 20) self.selector.move(10, 30) self.btn_ok = QPushButton('Удалить', self) self.btn_ok.setFixedSize(100, 30) self.btn_ok.move(230, 20) self.btn_ok.clicked.connect(self.remove_user) self.btn_cancel = QPushButton('Отмена', self) self.btn_cancel.setFixedSize(100, 30) self.btn_cancel.move(230, 60) self.btn_cancel.clicked.connect(self.close) self.all_users_fill() def all_users_fill(self): '''Метод заполняющий список пользователей.''' self.selector.addItems( [item[0] for item in self.database.users_list()]) def remove_user(self): '''Метод - обработчик удаления пользователя.''' self.database.remove_user(self.selector.currentText()) if self.selector.currentText() in self.server.names: sock = self.server.names[self.selector.currentText()] del self.server.names[self.selector.currentText()] self.server.remove_client(sock) # Рассылаем клиентам сообщение о необходимости обновить справичники self.server.service_update_lists() self.close()
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/remove_user.py
remove_user.py
import threading import logging import select import socket import json import hmac import binascii import os from common.descriptors import ServerPort, ServerAddress from common.settings import MAX_CONNECTIONS, DESTINATION, SENDER, ACTION, \ PRESENCE, MESSAGE, MESSAGE_TEXT, USER, TIME, ACCOUNT_NAME, ERROR, \ RESPONSE_200, RESPONSE_400, EXIT, GET_CONTACTS, LIST_INFO, ADD_CONTACT, \ RESPONSE_202, REMOVE_CONTACT, PUBLIC_KEY_REQUEST, RESPONSE_511, DATA, \ USERS_REQUEST, RESPONSE, RESPONSE_205, PUBLIC_KEY from common.utils import send_message, recv_message from common.decos import login_required # Инициализация логирования SERVER_LOGGER = logging.getLogger('server') class MessageProcessor(threading.Thread): ''' Основной класс сервера. Принимает содинения, словари - пакеты от клиентов, обрабатывает поступающие сообщения. Работает в качестве отдельного потока. ''' port = ServerPort() addr = ServerAddress() def __init__(self, listen_address, listen_port, database): # Параментры подключения: self.addr = listen_address self.port = listen_port # База данных сервера: self.database = database # Сокет, через который будет осуществляться работа: self.sock = None # Список подключённых клиентов: self.clients = [] # Сокеты: self.listen_sockets = None self.error_sockets = None # Флаг продолжения работы: self.running = True # Словарь содержащий сопоставленные имена и соответствующие им сокеты: self.names = dict() # Конструктор родителя: super().__init__() def run(self): '''Метод основной цикл потока.''' # Инициализация Сокета: self.init_socket() # Основной цикл программы сервера: while self.running: # Ждём подключения, если таймаут вышел, ловим исключение: try: client, client_address = self.sock.accept() except OSError: pass else: SERVER_LOGGER.info(f'Установлено соединение с адресом: ' f'{client_address}') client.settimeout(5) self.clients.append(client) recv_msg_lst = [] send_msg_lst = [] err_lst = [] # Проверяем на наличие ждущих клиентов: try: if self.clients: recv_msg_lst, self.listen_sockets, \ self.error_sockets = select.select( self.clients, self.clients, [], 0) except OSError as err: SERVER_LOGGER.error(f'Ошибка работы с сокетами: {err.errno}') # Принимаем сообщения и если ошибка, исключаем клиента: if recv_msg_lst: for client_with_msg in recv_msg_lst: try: self.client_msg_handler( recv_message(client_with_msg), client_with_msg) except (OSError, json.JSONDecodeError, TypeError): self.remove_client(client_with_msg) def remove_client(self, client): ''' Метод обработчик клиента с которым прервана связь. Ищет клиента и удаляет его из списков и базы: ''' SERVER_LOGGER.info(f'Клиент {client.getpeername()} ' f'отключился от сервера.') for name in self.names: if self.names[name] == client: self.database.user_login(name) del self.names[name] break self.clients.remove(client) client.close() def init_socket(self): '''Метод инициализатор сокета.''' # Готовим сокет: transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM) transport.bind((self.addr, self.port)) transport.settimeout(0.5) # Начинаем слушать сокет: self.sock = transport self.sock.listen(MAX_CONNECTIONS) SERVER_LOGGER.info(f'Запущен сервер с портом для подключений: ' f'{self.port}, ' f'адрес с которого принимаются подключения: ' f'{self.addr}. ' f'Если адрес не указан, соединения будут приниматься ' f'с любых адресов') print('Сервер запущен') def proccess_message(self, message): ''' Метод отправки сообщения клиенту. ''' if message[DESTINATION] in self.names \ and self.names[message[DESTINATION]] in self.listen_sockets: try: send_message(self.names[message[DESTINATION]], message) SERVER_LOGGER.info(f'Отправлено сообщение пользователю ' f'{message[DESTINATION]} от пользователя ' f'{message[SENDER]}.') except OSError: self.remove_client(message[DESTINATION]) elif message[DESTINATION] in self.names \ and self.names[message[DESTINATION]] not in self.listen_sockets: SERVER_LOGGER.error(f'Связь с клиентом {message[DESTINATION]} ' f'была потеряна. Соединение закрыто, ' f'доставка невозможна.') self.remove_client(self.names[message[DESTINATION]]) else: SERVER_LOGGER.error(f'Пользователь {message[DESTINATION]} ' f'не зарегистрирован на сервере, ' f'отправка сообщения невозможна.') @login_required def client_msg_handler(self, message, client): '''Метод отбработчик поступающих сообщений.''' SERVER_LOGGER.debug(f'Обработка сообщения от клиента: {message}') # Если сообщение о присутствии, принимаем и отвечаем if ACTION in message and message[ACTION] == PRESENCE \ and TIME in message and USER in message: # Если сообщение о присутствии то вызываем функцию авторизации: self.autorize_user(message, client) # Если это сообщение, то добавляем его в очередь: elif ACTION in message and message[ACTION] == MESSAGE \ and DESTINATION in message and TIME in message \ and SENDER in message and MESSAGE_TEXT in message \ and self.names[message[SENDER]] == client: if message[DESTINATION] in self.names: self.database.process_message(message[SENDER], message[DESTINATION]) self.proccess_message(message) try: send_message(client, RESPONSE_200) except OSError: self.remove_client(client) else: response = RESPONSE_400 response[ERROR] = 'Пользователь не зарегистрирован на сервере.' try: send_message(client, response) except OSError: pass return # Если клиент выходит: elif ACTION in message and message[ACTION] == EXIT \ and ACCOUNT_NAME in message \ and self.names[message[ACCOUNT_NAME]] == client: self.remove_client(client) # Если запрос контакт листа: elif ACTION in message and message[ACTION] == GET_CONTACTS \ and USER in message and self.names[message[USER]] == client: response = RESPONSE_202 response[LIST_INFO] = self.database.get_contacts(message[USER]) try: send_message(client, response) except OSError: self.remove_client(client) # Если добаваление контакта: elif ACTION in message and message[ACTION] == ADD_CONTACT and \ ACCOUNT_NAME in message and USER in message \ and self.names[message[USER]] == client: self.database.add_contact(message[USER], message[ACCOUNT_NAME]) try: send_message(client, RESPONSE_200) except OSError: self.remove_client(client) # Если удаление контакта: elif ACTION in message and message[ACTION] == REMOVE_CONTACT \ and ACCOUNT_NAME in message and USER in message \ and self.names[message[USER]] == client: self.database.remove_contact(message[USER], message[ACCOUNT_NAME]) try: send_message(client, RESPONSE_200) except OSError: self.remove_client(client) # Если запрос известных пользователей: elif ACTION in message and message[ACTION] == USERS_REQUEST \ and ACCOUNT_NAME in message \ and self.names[message[ACCOUNT_NAME]] == client: response = RESPONSE_202 response[LIST_INFO] = [user[0] for user in self.database.users_list()] try: send_message(client, response) except OSError: self.remove_client(client) # Если это запрос публичного ключа пользователя: elif ACTION in message and message[ACTION] == PUBLIC_KEY_REQUEST \ and ACCOUNT_NAME in message: response = RESPONSE_511 response[DATA] = self.database.get_pubkey(message[ACCOUNT_NAME]) # может быть, что ключа ещё нет (пользователь никогда # не логинился, тогда шлём 400) if response[DATA]: try: send_message(client, response) except OSError: self.remove_client(client) else: response = RESPONSE_400 response[ERROR] = 'Нет публичного ключа ' \ 'для данного пользователя' try: send_message(client, response) except OSError: self.remove_client(client) # Иначе - Bad Request: else: response = RESPONSE_400 response[ERROR] = 'Запрос некорректен.' try: send_message(client, response) except OSError: self.remove_client(client) return def autorize_user(self, message, sock): '''Метод реализующий авторизцию пользователей.''' # Если имя пользователя уже занято то возвращаем 400: if message[USER][ACCOUNT_NAME] in self.names.keys(): response = RESPONSE_400 response[ERROR] = 'Имя пользователя уже занято.' try: send_message(sock, response) except OSError: pass self.clients.remove(sock) sock.close() # Проверяем что пользователь зарегистрирован на сервере: elif not self.database.check_user(message[USER][ACCOUNT_NAME]): response = RESPONSE_400 response[ERROR] = 'Пользователь не зарегистрирован.' try: send_message(sock, response) except OSError: pass self.clients.remove(sock) sock.close() else: # Иначе отвечаем 511 и проводим процедуру авторизации # Словарь - заготовка: message_auth = RESPONSE_511 # Набор байтов в hex представлении: random_str = binascii.hexlify(os.urandom(64)) # В словарь байты нельзя, декодируем (json.dumps -> TypeError): message_auth[DATA] = random_str.decode('ascii') # Создаём хэш пароля и связки с рандомной строкой, # сохраняем серверную версию ключа: hash = hmac.new( self.database.get_hash(message[USER][ACCOUNT_NAME]), random_str) digest = hash.digest() try: # Обмен с клиентом: send_message(sock, message_auth) ans = recv_message(sock) except OSError: sock.close() return client_digest = binascii.a2b_base64(ans[DATA]) # Если ответ клиента корректный, то сохраняем его # в список пользователей: if RESPONSE in ans and ans[RESPONSE] == 511 \ and hmac.compare_digest(digest, client_digest): self.names[message[USER][ACCOUNT_NAME]] = sock client_ip, client_port = sock.getpeername() try: send_message(sock, RESPONSE_200) except OSError: self.remove_client(message[USER][ACCOUNT_NAME]) # Добавляем пользователя в список активных и если # у него изменился открытый ключ сохраняем новый: self.database.user_login(message[USER][ACCOUNT_NAME], client_ip, client_port, message[USER][PUBLIC_KEY]) else: response = RESPONSE_400 response[ERROR] = 'Неверный пароль.' try: send_message(sock, response) except OSError: pass self.clients.remove(sock) sock.close() # Функция - отправляет сервисное сообщение 205 с требованием клиентам # обновить списки: def service_update_lists(self): '''Метод реализующий отправки сервисного сообщения 205 клиентам.''' for client in self.names: try: send_message(self.names[client], RESPONSE_205) except OSError: self.remove_client(self.names[client])
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/core.py
core.py
from PyQt5.QtWidgets import QDialog, QPushButton, QLineEdit, QApplication, \ QLabel, QMessageBox from PyQt5.QtCore import Qt import hashlib import binascii class RegisterUser(QDialog): '''Класс диалог регистрации пользователя на сервере.''' def __init__(self, database, server): super().__init__() self.database = database self.server = server self.setWindowTitle('Регистрация') self.setFixedSize(175, 183) self.setModal(True) self.setAttribute(Qt.WA_DeleteOnClose) self.label_username = QLabel('Введите имя пользователя:', self) self.label_username.move(10, 10) self.label_username.setFixedSize(150, 15) self.client_name = QLineEdit(self) self.client_name.setFixedSize(154, 20) self.client_name.move(10, 30) self.label_passwd = QLabel('Введите пароль:', self) self.label_passwd.move(10, 55) self.label_passwd.setFixedSize(150, 15) self.client_passwd = QLineEdit(self) self.client_passwd.setFixedSize(154, 20) self.client_passwd.move(10, 75) self.client_passwd.setEchoMode(QLineEdit.Password) self.label_conf = QLabel('Введите подтверждение:', self) self.label_conf.move(10, 100) self.label_conf.setFixedSize(150, 15) self.client_conf = QLineEdit(self) self.client_conf.setFixedSize(154, 20) self.client_conf.move(10, 120) self.client_conf.setEchoMode(QLineEdit.Password) self.btn_ok = QPushButton('Сохранить', self) self.btn_ok.move(10, 150) self.btn_ok.clicked.connect(self.save_data) self.btn_cancel = QPushButton('Выход', self) self.btn_cancel.move(90, 150) self.btn_cancel.clicked.connect(self.close) self.messages = QMessageBox() self.show() def save_data(self): ''' Метод проверки правильности ввода и сохранения в базу нового пользователя. ''' if not self.client_name.text(): self.messages.critical(self, 'Ошибка', 'Не указано имя пользователя.') return elif self.client_passwd.text() != self.client_conf.text(): self.messages.critical(self, 'Ошибка', 'Введённые пароли не совпадают.') return elif self.database.check_user(self.client_name.text()): self.messages.critical(self, 'Ошибка', 'Пользователь уже существует.') return else: passwd_bytes = self.client_passwd.text().encode('utf-8') salt = self.client_name.text().lower().encode('utf-8') passwd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000) self.database.add_user(self.client_name.text(), binascii.hexlify(passwd_hash)) self.messages.information(self, 'Успех', 'Пользователь успешно зарегистрирован.') # Рассылаем клиентам сообщение # о необходимости обновить справичники self.server.service_update_lists() self.close() if __name__ == '__main__': app = QApplication([]) app.setAttribute(Qt.AA_DisableWindowContextHelpButton) dial = RegisterUser(None) app.exec_()
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/add_user.py
add_user.py
from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QLabel, QTableView from PyQt5.QtGui import QStandardItemModel, QStandardItem from PyQt5.QtCore import QTimer from server.stat_window import StatWindow from server.config_window import ConfigWindow from server.add_user import RegisterUser from server.remove_user import DelUserDialog class MainWindow(QMainWindow): '''Класс - основное окно сервера.''' def __init__(self, database, server, config): # Конструктор предка super().__init__() # База данных сервера self.database = database self.server_thread = server self.config = config # Ярлык выхода self.exitAction = QAction('Выход', self) self.exitAction.setShortcut('Ctrl+Q') self.exitAction.triggered.connect(qApp.quit) # Кнопка обновить список клиентов self.refresh_button = QAction('Обновить список', self) # Кнопка настроек сервера self.config_btn = QAction('Настройки сервера', self) # Кнопка регистрации пользователя self.register_btn = QAction('Регистрация пользователя', self) # Кнопка удаления пользователя self.remove_btn = QAction('Удаление пользователя' , self) # Кнопка вывести историю сообщений self.show_history_button = QAction('История клиентов', self) # Статусбар self.statusBar() self.statusBar().showMessage('Server Working') # Тулбар self.toolbar = self.addToolBar('MainBar') self.toolbar.addAction(self.exitAction) self.toolbar.addAction(self.refresh_button) self.toolbar.addAction(self.show_history_button) self.toolbar.addAction(self.config_btn) self.toolbar.addAction(self.register_btn) self.toolbar.addAction(self.remove_btn) # Настройки геометрии основного окна self.setFixedSize(800, 600) self.setWindowTitle('Messaging Server alpha release') # Надпись о том, что ниже список подключённых клиентов self.label = QLabel('Список подключённых клиентов:', self) self.label.setFixedSize(240, 15) self.label.move(10, 25) # Окно со списком подключённых клиентов. self.active_clients_table = QTableView(self) self.active_clients_table.move(10, 45) self.active_clients_table.setFixedSize(780, 400) # Таймер, обновляющий список клиентов 1 раз в секунду self.timer = QTimer() self.timer.timeout.connect(self.create_users_model) self.timer.start(1000) # Связываем кнопки с процедурами self.refresh_button.triggered.connect(self.create_users_model) self.show_history_button.triggered.connect(self.show_statistics) self.config_btn.triggered.connect(self.server_config) self.register_btn.triggered.connect(self.reg_user) self.remove_btn.triggered.connect(self.rem_user) # Последним параметром отображаем окно. self.show() def create_users_model(self): '''Метод заполняющий таблицу активных пользователей.''' list_users = self.database.active_users_list() list = QStandardItemModel() list.setHorizontalHeaderLabels( ['Имя Клиента', 'IP Адрес', 'Порт', 'Время подключения']) for row in list_users: user, ip, port, time = row user = QStandardItem(user) user.setEditable(False) ip = QStandardItem(ip) ip.setEditable(False) port = QStandardItem(str(port)) port.setEditable(False) # Уберём милисекунды из строки времени, # т.к. такая точность не требуется. time = QStandardItem(str(time.replace(microsecond=0))) time.setEditable(False) list.appendRow([user, ip, port, time]) self.active_clients_table.setModel(list) self.active_clients_table.resizeColumnsToContents() self.active_clients_table.resizeRowsToContents() def show_statistics(self): '''Метод создающий окно со статистикой клиентов.''' global stat_window stat_window = StatWindow(self.database) stat_window.show() def server_config(self): '''Метод создающий окно с настройками сервера.''' global config_window # Создаём окно и заносим в него текущие параметры config_window = ConfigWindow(self.config) def reg_user(self): '''Метод создающий окно регистрации пользователя.''' global reg_window reg_window = RegisterUser(self.database , self.server_thread) reg_window.show() def rem_user(self): '''Метод создающий окно удаления пользователя.''' global rem_window rem_window = DelUserDialog(self.database , self.server_thread) rem_window.show()
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/main_window.py
main_window.py
import datetime from sqlalchemy import create_engine, Column, Integer, String, \ ForeignKey, DateTime, Text from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker class ServerDataBase: ''' Класс - оболочка для работы с базой данных сервера. Использует SQLite базу данных, реализован с помощью SQLAlchemy ORM и используется декларативный подход. ''' Base = declarative_base() class AllUsers(Base): '''Класс - отображение таблицы всех пользователей.''' __tablename__ = 'all_users' id = Column(Integer, primary_key=True) name = Column(String, unique=True) last_login = Column(DateTime) passwd_hash = Column(String) pubkey = Column(Text) def __init__(self, username, passwd_hash): self.name = username self.last_login = datetime.datetime.now() self.passwd_hash = passwd_hash self.pubkey = None self.id = None class ActiveUsers(Base): '''Класс - отображение таблицы активных пользователей.''' __tablename__ = 'active_users' id = Column(Integer, primary_key=True) user = Column(String, ForeignKey('all_users.id'), unique=True) ip_address = Column(String) port = Column(Integer) login_time = Column(DateTime) def __init__(self, user_id, ip_address, port, login_time): self.user = user_id self.ip_address = ip_address self.port = port self.login_time = login_time self.id = None class LoginHistory(Base): '''Класс - отображение таблицы истории входов.''' __tablename__ = 'login_history' id = Column(Integer, primary_key=True) name = Column(String, ForeignKey('all_users.id')) date_time = Column(DateTime) ip = Column(String) port = Column(String) def __init__(self, name, date, ip, port): self.id = None self.name = name self.date_time = date self.ip = ip self.port = port class UsersContacts(Base): '''Класс - отображение таблицы контактов пользователей.''' __tablename__ = 'contacts' id = Column(Integer, primary_key=True) user = Column(ForeignKey('all_users.id')) contact = Column(ForeignKey('all_users.id')) def __init__(self, user, contact): self.id = None self.user = user self.contact = contact class UsersHistory(Base): '''Класс - отображение таблицы истории действий.''' __tablename__ = 'history' id = Column(Integer, primary_key=True) user = Column(ForeignKey('all_users.id')) sent = Column(Integer) accepted = Column(Integer) def __init__(self, user): self.id = None self.user = user self.sent = 0 self.accepted = 0 def __init__(self, path): # Создаём движок базы данных: self.engine = create_engine(f'sqlite:///{path}', echo=False, pool_recycle=7200, connect_args={ 'check_same_thread': False}) self.Base.metadata.create_all(self.engine) # Создаём сессию: Session = sessionmaker(bind=self.engine) self.session = Session() # Если в таблице активных пользователей есть записи, # то их необходимо удалить self.session.query(self.ActiveUsers).delete() self.session.commit() def user_login(self, username, ip_address, port, key): ''' Метод выполняющийся при входе пользователя, записывает в базу факт входа. Обновляет открытый ключ пользователя при его изменении. ''' # Запрос в таблицу пользователей на наличие там пользователя с таким # именем rez = self.session.query(self.AllUsers).filter_by(name=username) # Если имя пользователя уже присутствует в таблице, # обновляем время последнего входа и проверяем корректность ключа. # Если клиент прислал новый ключ, сохраняем его: if rez.count(): user = rez.first() user.last_login = datetime.datetime.now() if user.pubkey != key: user.pubkey = key # Если нету, то генерируем исключение: else: raise ValueError('Пользователь не заврегистрирован.') # Теперь можно создать запись в таблицу активных пользователей # о факте входа: new_active_user = self.ActiveUsers(user.id, ip_address, port, datetime.datetime.now()) self.session.add(new_active_user) # И сохранить в историю входов: history = self.LoginHistory(user.id, datetime.datetime.now(), ip_address, port) self.session.add(history) self.session.commit() def add_user(self, name, passwd_hash): ''' Метод регистрации пользователя. Принимает имя и хэш пароля, создаёт запись в таблице статистики. ''' user_row = self.AllUsers(name, passwd_hash) self.session.add(user_row) self.session.commit() history_row = self.UsersHistory(user_row.id) self.session.add(history_row) self.session.commit() def remove_user(self, name): '''Метод удаляющий пользователя из базы.''' user = self.session.query(self.AllUsers).filter_by(name=name).first() self.session.query(self.AllUsers).filter_by(user=user.id).delete() self.session.query(self.LoginHistory).filter_by(name=user.id).delete() self.session.query(self.UsersContacts).filter_by(user=user.id).delete() self.session.query( self.UsersContacts).filter_by(contact=user.id).delete() self.session.query(self.UsersHistory).filter_by(user=user.id).delete() self.session.query(self.AllUsers).filter_by(name=name).delete() self.session.commit() def get_hash(self, name): '''Метод получения хэша пароля пользователя.''' user = self.session.query(self.AllUsers).filter_by(name=name).first() return user.passwd_hash def get_pubkey(self, name): '''Метод получения публичного ключа пользователя.''' user = self.session.query(self.AllUsers).filter_by(name=name).first() return user.pubkey def check_user(self, name): '''Метод проверяющий существование пользователя.''' if self.session.query(self.AllUsers).filter_by(name=name).count(): return True else: return False def user_logout(self, username): '''Метод фиксирующий отключения пользователя.''' # Запрашиваем пользователя, что покидает нас: user = self.session.query(self.AllUsers).filter_by( name=username).first() # Удаляем его из таблицы активных пользователей: self.session.query(self.ActiveUsers).filter_by(user=user.id).delete() self.session.commit() def process_message(self, sender, recipient): '''Метод записывающий в таблицу статистики факт передачи сообщения.''' # Получаем ID отправителя и получателя: sender = self.session.query(self.AllUsers).filter_by( name=sender).first().id recipient = self.session.query(self.AllUsers).filter_by( name=recipient).first().id # Запрашиваем строки из истории и увеличиваем счётчики: sender_row = self.session.query(self.UsersHistory).filter_by( user=sender).first() sender_row.sent += 1 recipient_row = self.session.query(self.UsersHistory).filter_by( user=recipient).first() recipient_row.accepted += 1 self.session.commit() # Функция добавляет контакт для пользователя: def add_contact(self, user, contact): '''Метод добавления контакта для пользователя.''' # Получаем ID пользователей user = self.session.query(self.AllUsers).filter_by( name=user).first() contact = self.session.query(self.AllUsers).filter_by( name=contact).first() # Проверяем что не дубль и что контакт может существовать (полю # пользователь мы доверяем): if not contact or self.session.query(self.UsersContacts).filter_by( user=user.id, contact=contact.id).count(): return # Создаём объект и заносим его в базу: contact_row = self.UsersContacts(user.id, contact.id) self.session.add(contact_row) self.session.commit() def remove_contact(self, user, contact): '''Метод удаления контакта пользователя.''' # Получаем ID пользователей: user = self.session.query(self.AllUsers).filter_by( name=user).first() contact = self.session.query(self.AllUsers).filter_by( name=contact).first() # Проверяем что контакт может существовать (полю пользователь мы # доверяем): if not contact: return # Удаляем требуемое: self.session.query(self.UsersContacts).filter( self.UsersContacts.user == user.id, self.UsersContacts.contact == contact.id ).delete() self.session.commit() def users_list(self): ''' Метод возвращающий список известных пользователей со временем последнего входа. ''' # Запрос строк таблицы пользователей: query = self.session.query( self.AllUsers.name, self.AllUsers.last_login ) # Возвращаем список кортежей: return query.all() def active_users_list(self): '''Метод возвращающий список активных пользователей.''' # Запрашиваем соединение таблиц # и собираем кортежи имя, адрес, порт, время: query = self.session.query( self.AllUsers.name, self.ActiveUsers.ip_address, self.ActiveUsers.port, self.ActiveUsers.login_time ).join(self.AllUsers) # Возвращаем список кортежей: return query.all() def login_history(self, username=None): '''Метод возвращающий историю входов.''' # Запрашиваем историю входа: query = self.session.query( self.AllUsers.name, self.LoginHistory.date_time, self.LoginHistory.ip, self.LoginHistory.port, ).join(self.AllUsers) # Если было указано имя пользователя, то фильтруем по нему: if username: query = query.filter(self.AllUsers.name == username) # Возвращаем список кортежей: return query.all() def get_contacts(self, username): '''Метод возвращающий список контактов пользователя.''' # Запрашивааем указанного пользователя: user = self.session.query(self.AllUsers).filter_by( name=username).one() # Запрашиваем его список контактов: query = self.session.query(self.UsersContacts, self.AllUsers.name). \ filter_by(user=user.id). \ join(self.AllUsers, self.UsersContacts.contact == self.AllUsers.id) # Выбираем только имена пользователей и возвращаем их: return [contact[1] for contact in query.all()] def message_history(self): '''Метод возвращающий статистику сообщений.''' query = self.session.query( self.AllUsers.name, self.AllUsers.last_login, self.UsersHistory.sent, self.UsersHistory.accepted ).join(self.AllUsers) # Возвращаем список кортежей: return query.all() # Отладка: if __name__ == '__main__': test_db = ServerDataBase('../../server_database.db3') test_db.user_login('test1', '192.168.1.113', 8080, 'jhgfd') test_db.user_login('test2', '192.168.1.113', 8081, 'kjfyd') print(test_db.users_list()) # print(test_db.active_users_list()) # test_db.user_logout('McG') # print(test_db.login_history('re')) # test_db.add_contact('test2', 'test1') # test_db.add_contact('test1', 'test3') # test_db.add_contact('test1', 'test6') # test_db.remove_contact('test1', 'test3') test_db.process_message('test1', 'test2') print(test_db.message_history())
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/database.py
database.py
"""Настройки по умолчанию""" import logging # Порт по умолчанию DEFAULT_PORT = 7777 # IP адрес для подключения по умолчанию DEFAULT_IP_ADDRESS = '127.0.0.1' # Максимальная очередь подключения клиента MAX_CONNECTIONS = 5 # Максимальная длина сообщения в байтах MAX_PACKAGE_LENGTH = 10240 # Кодировка проекта ENCODING = 'utf-8' # Текущий уровень логирования LOGGING_LEVEL = logging.DEBUG # База данных для хранения данных сервера SERVER_CONFIG = 'server.ini' # Потокол JIM. Основные ключи ACTION = 'action' TIME = 'time' USER = 'user' ACCOUNT_NAME = 'account_name' SENDER = 'from' DESTINATION = 'to' DATA = 'bin' PUBLIC_KEY = 'pubkey' # Прочие ключи, используемые в протоколе PRESENCE = 'presence' RESPONSE = 'response' ERROR = 'error' MESSAGE = 'message' MESSAGE_TEXT = 'msg_text' EXIT = 'exit' GET_CONTACTS = 'get_contacts' LIST_INFO = 'data_list' REMOVE_CONTACT = 'remove' ADD_CONTACT = 'add' USERS_REQUEST = 'get_users' PUBLIC_KEY_REQUEST = 'pubkey_need' # Словари с ответами # 200 RESPONSE_200 = {RESPONSE: 200} # 202 RESPONSE_202 = { RESPONSE: 202, LIST_INFO: None } # 400 RESPONSE_400 = { RESPONSE: 400, ERROR: None } # 205 RESPONSE_205 = { RESPONSE: 205 } # 511 RESPONSE_511 = { RESPONSE: 511, DATA: None }
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/settings.py
settings.py
import sys import logging import socket sys.path.append('../') # Метод определения модуля, источника запуска: if sys.argv[0].find('client') == -1: LOGGER = logging.getLogger('server') else: LOGGER = logging.getLogger('client') def log(func_for_log): ''' Декоратор, выполняющий логирование вызовов функций. Сохраняет события типа debug, содержащие информацию о имени вызываемой функиции, параметры с которыми вызывается функция, и модуль, вызывающий функцию. ''' def log_create(*args, **kwargs): res = func_for_log(*args, **kwargs) LOGGER.debug(f'Вызвана функция {func_for_log.__name__} с параматреми ' f'{args}, {kwargs}. Вызов из модуля ' f'{func_for_log.__module__}') return res return log_create def login_required(func): ''' Декоратор, проверяющий, что клиент авторизован на сервере. Проверяет, что передаваемый объект сокета находится в списке авторизованных клиентов. За исключением передачи словаря-запроса на авторизацию. Если клиент не авторизован, генерирует исключение TypeError ''' def checker(*args, **kwargs): # Проверяем, что первый аргумент - экземпляр MessageProcessor # Импортировать необходимо тут, иначе ошибка рекурсивного импорта. from server.core import MessageProcessor from common.settings import ACTION, PRESENCE if isinstance(args[0], MessageProcessor): found = False for arg in args: if isinstance(arg, socket.socket): # Проверяем, что данный сокет есть в списке names # класса MessageProcessor for client in args[0].names: if args[0].names[client] == arg: found = True # Теперь надо проверить, что передаваемые аргументы # не presence сообщение for arg in args: if isinstance(arg, dict): if ACTION in arg and arg[ACTION] == PRESENCE: found = True # Если не авторизован и не сообщение начала авторизации, # то вызываем исключение. if not found: raise TypeError return func(*args, **kwargs) return checker
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/decos.py
decos.py
class ServerError(Exception): ''' Класс - исключение, для обработки ошибок сервера. При генерации требует строку с описанием ошибки, полученную с сервера. ''' def __init__(self, text): self.text = text def __str__(self): return self.text
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/errors.py
errors.py
import json import sys sys.path.append('../') from common.settings import MAX_PACKAGE_LENGTH, ENCODING from common.decos import log @log def recv_message(client): ''' Функция приёма сообщений от удалённых компьютеров. Принимает сообщения JSON, декодирует полученное сообщение и проверяет что получен словарь. :param client: сокет для передачи данных. :return: словарь - сообщение. ''' encoded_response = client.recv(MAX_PACKAGE_LENGTH) json_response = encoded_response.decode(ENCODING) response = json.loads(json_response) if isinstance(response, dict): return response else: raise TypeError @log def send_message(sock, message): ''' Функция отправки словарей через сокет. Кодирует словарь в формат JSON и отправляет через сокет. :param sock: сокет для передачи :param message: словарь для передачи :return: ничего не возвращает ''' json_message = json.dumps(message) encoded_message = json_message.encode(ENCODING) sock.send(encoded_message)
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/utils.py
utils.py
import dis class ServerVerifier(type): ''' Метакласс, проверяющий что в результирующем классе нет клиентских вызовов таких как: connect. Также проверяется, что серверный сокет является TCP и работает по IPv4 протоколу. ''' def __init__(self, clsname, bases, clsdict): # Список методов, которые используются в функциях класса: methods = [] # Атрибуты, вызываемые функциями классов: attrs = [] for func in clsdict: try: ret = dis.get_instructions(clsdict[func]) # Если не функция то ловим исключение: except TypeError: pass else: # Раз функция разбираем код, получая используемые методы и # атрибуты. for instr in ret: if instr.opname == 'LOAD_GLOBAL': if instr.argval not in methods: methods.append(instr.argval) elif instr.opname == 'LOAD_ATTR': if instr.argval not in attrs: attrs.append(instr.argval) # Если обнаружено использование недопустимого метода connect, # генерируем исключение: if 'connect' in methods: raise TypeError('Использование метода connect недопустимо ' 'в серверной части приложения') # Если сокет не инициализировался константами SOCK_STREAM(TCP) # AF_INET(IPv4), тоже исключение. if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs): raise TypeError('Некорректная инициализация сокета.') super().__init__(clsname, bases, clsdict) class ClientVerifier(type): ''' Метакласс, проверяющий что в результирующем классе нет серверных вызовов таких как: accept, listen. Также проверяется, что сокет не создаётся внутри конструктора класса. ''' def __init__(self, clsname, bases, clsdict): # Список методов, которые используются в функциях класса: methods = [] for func in clsdict: try: ret = dis.get_instructions(clsdict[func]) # Если не функция то ловим исключение: except TypeError: pass else: # Раз функция разбираем код, получая используемые методы: for instr in ret: if instr.opname == 'LOAD_GLOBAL': if instr.argval not in methods: methods.append(instr.argval) # Если обнаружено использование недопустимого метода accept, listen, # socket бросаем исключение: for command in ('accept', 'listen', 'socket'): if command in methods: raise TypeError( 'Обнаружено использование недопустимого метода') # Вызов get_message или send_message из utils считаем корректным # использованием сокетов if 'recv_message' in methods or 'send_message' in methods: pass else: raise TypeError( 'Отсутствуют вызовы функций, работающих с сокетами') super().__init__(clsname, bases, clsdict)
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/metaclasses.py
metaclasses.py
import logging import sys import ipaddress import socket SERVER_LOGGER = logging.getLogger('server') class ServerPort: ''' Класс - дескриптор для номера порта. Позволяет использовать только порты с 1023 по 65536. При попытке установить неподходящий номер порта генерирует исключение. ''' def __set__(self, instance, port): if not 1023 < port < 65535: SERVER_LOGGER.critical(f'Попытка запуска с неподходящим номером ' f'порта: {port}. Номер порта должен ' f'находиться в диапозоне от 1024 до 65535') raise TypeError('Некорректный номер порта') instance.__dict__[self.port] = port def __set_name__(self, owner, port): self.port = port class ServerAddress: ''' Класс - дескриптор для ip адреса. Позволяет использовать только ip адреса или имя хоста. При попытке ввода неподходящего адреса генерирует исключение. ''' def __set__(self, instance, value): try: ipaddress.ip_address(socket.gethostbyname(value)) except socket.gaierror: SERVER_LOGGER.critical(f'Попытка запуска с неккоректным ' f'ip адресом: {value}') sys.exit(1) instance.__dict__[self.name] = value def __set_name__(self, owner, name): self.name = name
AASMessenger_Server
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/descriptors.py
descriptors.py
from setuptools import setup, find_packages setup( name = 'AAUtility', version='3.0.3', description='package utility function in common package', url='https://github.com/gauravccloud/AAUtility.git', author='Gaurav Sharma', author_email='gauravdbdev@gmail.com', license='MIT', packages=['AAUtility'], install_requires=[], zip_safe=False )
AAUtility
/AAUtility-3.0.3.tar.gz/AAUtility-3.0.3/setup.py
setup.py
#!/usr/bin/env python # -*- coding:utf-8 -*- ############################################# # File Name: setup.py # Author: mage # Mail: mage@woodcol.com # Created Time: 2018-1-23 19:17:34 ############################################# from setuptools import setup, find_packages setup( name="AAdeepLearning", version="1.0.8", keywords=("AAdeepLearning", "AA", "deepLearning frame"), description="AAdeepLearning is a deep learning frame", long_description="AAdeepLearning is a deep learning frame", license="MIT Licence", url="https://github.com/luojiangtao/aadeeplearning", author="luojiangtao", author_email="1368761119@qq.com", packages=find_packages(), include_package_data=True, platforms="any", install_requires=['numpy'] )
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/setup.py
setup.py
def get_default_config(): # 默认配置文件 default_config = { # 初始学习率 "learning_rate": 0.001, # 优化策略: sgd/momentum/rmsprop/adam "optimizer": "adam", # 如果想使用添加动量的梯度下降算法做优化,需要设置这一项,通常设置为 0.9/0.95 即可,一般不需要调整 "momentum_coefficient": 0.9, # rmsprop优化器的衰减系数 "rmsprop_decay": 0.95, # 训练多少次 "number_iteration": 2000, # 每次用多少个样本训练 "batch_size": 64, # 迭代多少次打印一次信息 "display": 100, # 保存模型快照的名称 "save_model": "", # 每隔几个迭代周期保存一次快照? "save_iteration": 100, # 预训练参数模型所在路径 "load_model": "" } return default_config
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/config.py
config.py
"""Training-related part of the Keras engine. """ # from __future__ import absolute_import # from __future__ import division # from __future__ import print_function # # import warnings # import copy import time import pickle import numpy as np from .layer.fully_connected import FullyConnected from .layer.dropout import Dropout from .layer.batch_normalization import BatchNormalization from .layer.rnn import RNN from .layer.lstm import LSTM from .activation.relu import Relu from .activation.sigmoid import Sigmoid from .activation.tanh import Tanh from .loss.softmax import SoftMax from .loss.svm import SVM from .layer.convolutional import Convolutional from .layer.pooling import Pooling from .layer.flatten import Flatten from .config import get_default_config class AADeepLearning: """ 入口 """ config = None # 损失值 loss = [] # 训练数据 shape: (60000, 28, 28, 1) (样本数, 宽, 高, 通道数) train_data = [] # 训练数据标签 train_label = [] # 损失值 test_data = [] # 损失值 test_lable = [] # 损失值 input_shape = 0 # 学习率 learning_rate = 0 # 神经网络层数 layer_number = 0 # 神经网络参数 weight和bias net = {} # 缓存loss loss_list = [] # 缓存准确率 accuracy_list = [] def __init__(self, net={}, config={}): """ 初始化 :param net: 网络结构 :param config: 配置项 """ # 合并配置文件,后者覆盖前者 self.config = {**get_default_config(), **config} # 网络结构和定义层一致 self.net = net self.learning_rate = self.config['learning_rate'] self.net = self.init_net(net) self.is_load_model = False if self.config["load_model"] != "": # 加载模型,进行预测或者继续训练 self.reload(self.config["load_model"]) self.is_load_model = True def init_net(self, net): """ 初始化网络所需的对象,方便后期调用,不用每次都重复判断 :param net: 网络结构 :return: 网络结构 """ for i, layer in enumerate(net): if layer['type'] == 'convolutional': net[i]['object'] = Convolutional() elif layer['type'] == 'pooling': net[i]['object'] = Pooling() elif layer['type'] == 'flatten': net[i]['object'] = Flatten() elif layer['type'] == 'fully_connected': net[i]['object'] = FullyConnected() elif layer['type'] == 'dropout': net[i]['object'] = Dropout() elif layer['type'] == 'batch_normalization': net[i]['object'] = BatchNormalization() elif layer['type'] == 'relu': net[i]['object'] = Relu() elif layer['type'] == 'sigmoid': net[i]['object'] = Sigmoid() elif layer['type'] == 'tanh': net[i]['object'] = Tanh() elif layer['type'] == 'rnn': net[i]['object'] = RNN() elif layer['type'] == 'lstm': net[i]['object'] = LSTM() elif layer['type'] == 'softmax': net[i]['object'] = SoftMax() elif layer['type'] == 'svm': net[i]['object'] = SVM() return net def train(self, x_train=None, y_train=None, is_train=True): """ 训练 :param x_train: 数据 :param y_train: 标签 :param is_train: 是否是训练模式 """ if len(x_train.shape) == 4: # 训练立方体数据 例如图片数据 宽*高*通道数 flow_data_shape = { "batch_size": self.config['batch_size'], "channel": x_train.shape[1], "height": x_train.shape[2], "width": x_train.shape[3] } else: # 训练序列数据 样本 * 序列个数 * 序列长度 flow_data_shape = { "batch_size": self.config['batch_size'], "sequence_number": x_train.shape[1], "sequence_length": x_train.shape[2] } # 1,初始化网络参数 if self.is_load_model == False: # 没有载入已训练好的模型,则初始化 self.net = self.init_parameters(flow_data_shape) for iteration in range(1, self.config['number_iteration'] + 1): x_train_batch, y_train_batch = self.next_batch(x_train, y_train, self.config['batch_size']) # 2,前向传播 flow_data = self.forward_pass(self.net, x_train_batch, is_train=is_train) # loss = self.compute_cost(flow_data, y_train_batch) # 3,调用最后一层的计算损失函数,计算损失 loss = self.net[len(self.net)-1]['object'].compute_cost(flow_data, self.net[len(self.net)-1], y_train_batch) self.loss_list.append(loss) # 4,反向传播,求梯度 self.net = self.backward_pass(self.net, flow_data, y_train_batch) # 梯度检验 # self.gradient_check(x=x_train_batch, y=y_train_batch, net=self.net, layer_name='convolutional_1', weight_key='W', gradient_key='dW') # exit() # 5,根据梯度更新一次参数 self.net = self.update_parameters(self.net, iteration) if iteration % self.config["display"] == 0: # self.check_weight(self.net) _, accuracy = self.predict(x_train_batch, y_train_batch, is_train=is_train) self.accuracy_list.append(accuracy) now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print(now_time, ' iteration:', iteration, ' loss:', loss, ' accuracy:', accuracy) if self.config["save_model"] != "" and iteration % self.config["save_iteration"] == 0: print('saving model...') self.save(self.config["save_model"] + "-" + str(iteration) + '.model') def init_parameters(self, flow_data_shape): """ 初始化权重和偏置项 :param flow_data_shape: 流动数据形状 :return: 网络结构 """ net = self.net for i, layer in enumerate(net): net[i], flow_data_shape = layer['object'].init(layer=layer, flow_data_shape=flow_data_shape, config=self.config) return net def forward_pass(self, net, x, is_train=False): """ 前向传播 :param net: 网络结构 :param x: 数据 :param is_train: 是否是训练模式 :return: 流动数据 """ # 流动数据,一层一层的计算,并向后流动 flow_data = x for i, layer in enumerate(net): # 缓存当前层的输入 net[i]['input'] = flow_data flow_data, net[i] = layer["object"].forword(flow_data=flow_data, layer=layer, is_train=is_train) # 缓存当前层的输出 net[i]['output'] = flow_data return flow_data def backward_pass(self, net, flow_data, train_label): """ 反向传播 :param net: 网络结构 :param flow_data: 前向传播最后一层输出 :param train_label: 标签 :return: 包含梯度的网络结构 """ layer_number = len(net) for i in reversed(range(0, layer_number)): layer = net[i] if i == len(net)-1: # 最后一层 flow_data = layer["object"].backword(flow_data=flow_data, layer=layer, label=train_label) else: flow_data, net[i] = layer["object"].backword(flow_data=flow_data, layer=layer, config=self.config) return net def update_parameters(self, net, iteration): """ 更新权重,偏置项 :param net: 网络结构 :param iteration: 迭代次数 :return: 更新权重,偏置项后的网络结构 """ for i, layer in enumerate(net): net[i] = layer['object'].update_parameters(layer=layer, config=self.config, iteration=iteration) return net def save(self, path="AA.model"): """ 保存模型 :param path: 路径 """ with open(path, "wb") as f: pickle.dump(self.net, f) def reload(self, path="AA.model"): """ 载入模型 :param path: 路径 """ with open(path, "rb") as f: self.net = pickle.load(f) def predict(self, x_test=None, y_test=None, is_train=False): """ 预测 :param x_test: 预测数据 :param y_test: 预测标签 :param is_train: 是否是训练模式 :return: 概率分布矩阵,准确率 """ # if x_test.shape[0] > 500: # print("Verify the accuracy on " + str(x_test.shape[0]) + " test set, please wait a moment.") flow_data = self.forward_pass(self.net, x_test, is_train) flow_data = np.array(flow_data).T batch_size = y_test.shape[0] right = 0 for i in range(0, batch_size): index = np.argmax(flow_data[i]) if y_test[i][index] == 1: right += 1 accuracy = right / batch_size return flow_data, accuracy def next_batch(self, train_data, train_label, batch_size): """ 随机获取下一批数据 :param train_data: :param train_label: :param batch_size: :return: """ index = [i for i in range(0, len(train_label))] # 洗牌后卷积核个数居然会改变固定位置的图片? np.random.shuffle(index) batch_data = [] batch_label = [] for i in range(0, batch_size): batch_data.append(train_data[index[i]]) batch_label.append(train_label[index[i]]) batch_data = np.array(batch_data) batch_label = np.array(batch_label) return batch_data, batch_label def visualization_loss(self): """ 画出损失曲线 :return: """ import matplotlib.pyplot as plt plt.plot(self.loss_list, 'r') plt.xlabel("iteration") plt.ylabel("loss") plt.show() def visualization_accuracy(self): """ 画出正确率曲线 :return: """ import matplotlib.pyplot as plt plt.plot(self.accuracy_list, 'g') plt.xlabel("display") plt.ylabel("accuracy") plt.show() def check_weight(self, net): """ 检查权重,查看小于1e-8的比例 :param net: :return: """ for i, layer in enumerate(net): if layer['type'] == 'fully_connected': print(layer["name"], ":dW|<1e-8 :", np.sum(abs(layer['dW']) < 1e-8), "/", layer['dW'].shape[0] * layer['dW'].shape[1]) print(layer['name'] + ":db|<1e-8 :", np.sum(abs(layer['db']) < 1e-8), "/", layer['db'].shape[0] * layer['db'].shape[1]) elif layer['type'] == 'convolutional': print(layer["name"], ":dW|<1e-8 :", np.sum(abs(layer['dW']) < 1e-8), "/", layer['dW'].shape[0] * layer['dW'].shape[1] * layer['dW'].shape[2] * layer['dW'].shape[3]) print(layer['name'] + ":db|<1e-8 :", np.sum(abs(layer['db']) < 1e-8), "/", layer['db'].shape[0] * layer['db'].shape[1] * layer['db'].shape[2]) elif layer['type'] == 'rnn': print(layer['name'] + ":weight_U_gradient" + str(i) + "|<1e-8 :", np.sum(abs(layer['weight_U_gradient']) < 1e-8), "/", layer['weight_U_gradient'].shape[0] * layer['weight_U_gradient'].shape[1]) print(layer['name'] + ":weight_W_gradient" + str(i) + "|<1e-8 :", np.sum(abs(layer['weight_W_gradient']) < 1e-8), "/", layer['weight_W_gradient'].shape[0] * layer['weight_W_gradient'].shape[1]) print(layer['name'] + ":weight_V_gradient" + str(i) + "|<1e-8 :", np.sum(abs(layer['weight_V_gradient']) < 1e-8), "/", layer['weight_V_gradient'].shape[0] * layer['weight_V_gradient'].shape[1]) elif layer['type'] == 'lstm': print(layer['name'] + ":dWf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dWf']) < 1e-8), "/", layer['dWf'].shape[0] * layer['dWf'].shape[1]) print(layer['name'] + ":dUf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dUf']) < 1e-8), "/", layer['dUf'].shape[0] * layer['dUf'].shape[1]) print(layer['name'] + ":dbf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dbf']) < 1e-8), "/", layer['dbf'].shape[0] * layer['dbf'].shape[1]) print(layer['name'] + ":dWi" + str(i) + "|<1e-8 :", np.sum(abs(layer['dWi']) < 1e-8), "/", layer['dWi'].shape[0] * layer['dWi'].shape[1]) print(layer['name'] + ":dUf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dUf']) < 1e-8), "/", layer['dUi'].shape[0] * layer['dUi'].shape[1]) print(layer['name'] + ":dbf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dbf']) < 1e-8), "/", layer['dbi'].shape[0] * layer['dbi'].shape[1]) def gradient_check(self, x, y, net, layer_name, weight_key, gradient_key, epsilon=1e-4): """ 梯度检验 :param x: 数据 :param y: 标签 :param net: 网络结构 :param layer_name: 需要检验的层名称 :param weight_key: 需要检验的权重键名 :param gradient_key: 需要检验的梯度键名 :param epsilon: 数值逼近的x长度 """ # 1,要检验的梯度展成一列 layer_number = -1 # 第几层 for j, layer in enumerate(net): if layer['name'] == layer_name: layer_number = j break assert layer_number != -1 # 梯度字典转列向量(n,1) gradient_vector = np.reshape(net[layer_number][gradient_key], (-1, 1)) # 参数字典转列向量(n,1) weight_vector = np.reshape(net[layer_number][weight_key], (-1, 1)) # 数值逼近求得的梯度 gradient_vector_approach = np.zeros(gradient_vector.shape) lenght = weight_vector.shape[0] # 遍历,每次求权重一个数据点的梯度,然后串联起来 for i in range(lenght): if i % 10 == 0: print("gradient checking i/len=", i, "/", lenght) weight_vector_plus = np.copy(weight_vector) weight_vector_plus[i][0] = weight_vector_plus[i][0] + epsilon net[layer_number][weight_key] = np.reshape(weight_vector_plus, net[layer_number][weight_key].shape) # 2,前向传播 flow_data = self.forward_pass(net=net, x=x) # 3,计算损失 # J_plus_epsilon = self.compute_cost(flow_data, y) J_plus_epsilon = net[len(net) - 1]['object'].compute_cost(flow_data, net[len(net) - 1], y) weight_vector_minus = np.copy(weight_vector) weight_vector_minus[i][0] = weight_vector_minus[i][0] - epsilon net[layer_number][weight_key] = np.reshape(weight_vector_minus, net[layer_number][weight_key].shape) # 2,前向传播 flow_data = self.forward_pass(net=net, x=x) # 3,计算损失 # J_minus_epsilon = self.compute_cost(flow_data, y) J_minus_epsilon = net[len(net) - 1]['object'].compute_cost(flow_data, net[len(net) - 1], y) # 数值逼近求得梯度 gradient_vector_approach[i][0] = (J_plus_epsilon - J_minus_epsilon) / (epsilon * 2) # 和解析解求得的梯度做欧式距离 diff = np.sqrt(np.sum((gradient_vector - gradient_vector_approach) ** 2)) / ( np.sqrt(np.sum((gradient_vector) ** 2)) + np.sqrt(np.sum((gradient_vector_approach) ** 2))) # 错误阈值 if diff > 1e-4: print("Maybe a mistake in your bakeward pass!!! diff=", diff) else: print("No problem in your bakeward pass!!! diff=", diff)
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/aadeeplearning.py
aadeeplearning.py
from __future__ import absolute_import from . import datasets from .aadeeplearning import AADeepLearning
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/__init__.py
__init__.py
"""Contains the base Layer class, from which all layers inherit. """ from __future__ import print_function from __future__ import absolute_import from __future__ import division import numpy as np class Rmsprop: """ 学习率自适应优化器 """ @staticmethod def update_parameters(layer, keys, learning_rate, decay=0.9): """ 更新参数 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param keys: 需要更新的键名 :param learning_rate: 学习率 :param decay: 衰减系数 :return: 更新后的层 """ # 防止除数为0 epsilon = 1e-8 temp = {} for key in keys: temp["S_d" + key] = np.zeros(layer[key].shape) for key in keys: temp["S_d" + key] = decay * temp["S_d" + key] + (1 - decay) * layer["d" + key] ** 2 layer[key] -= (learning_rate / (np.sqrt(temp["S_d" + key] + epsilon))) * layer["d" + key] return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/optimizer/rmsprop.py
rmsprop.py
from __future__ import print_function from __future__ import absolute_import from __future__ import division import numpy as np class Momentum: """ 动量优化器 """ @staticmethod def update_parameters(layer, keys, learning_rate, momentum_coefficient): """ 更新参数 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param keys: 需要更新的键名 :param learning_rate: 学习率 :param momentum_coefficient: 动量系数 :return: 更新后的层 """ temp = {} for key in keys: temp["V_d"+key] = np.zeros(layer[key].shape) for key in keys: temp["V_d"+key] = momentum_coefficient*temp["V_d"+key]+layer["d"+key] layer[key] -= learning_rate*temp["V_d"+key] return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/optimizer/momentum.py
momentum.py
from __future__ import print_function from __future__ import absolute_import from __future__ import division import numpy as np class Adam: """ Adam 优化器, 相当于 RMSprop + Momentum """ @staticmethod def update_parameters(layer, keys, learning_rate, iteration): """ 更新权重和偏置项 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param keys: 要更新的键名 :param learning_rate: 学习率 :param iteration: 迭代次数 :return: 更新后的层 """ beta_1 = 0.9 beta_2 = 0.999 epsilon = 1e-8 # 防止除数等于零 temp = {} for key in keys: temp["V_d"+key] = np.zeros(layer[key].shape) temp["S_d"+key] = np.zeros(layer[key].shape) for key in keys: temp["V_d"+key] = beta_1*temp["V_d"+key]+(1-beta_1)*layer["d"+key] temp["S_d"+key] = beta_2*temp["S_d"+key]+(1-beta_2)*layer["d"+key]**2 V_corrected = temp["V_d"+key]/(1-np.power(beta_1, iteration)) S_corrected = temp["S_d"+key]/(1-np.power(beta_2, iteration)) layer[key] -= learning_rate*(V_corrected/np.sqrt(S_corrected+epsilon)) return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/optimizer/adam.py
adam.py
class Sgd: """ 批量梯度下降 """ @staticmethod def update_parameters(layer, keys, learning_rate): """ 更新参数 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param keys: 需要更新的键名 :param learning_rate: 学习率 :return: 更新后的层 """ for key in keys: layer[key] -= learning_rate * layer["d" + key] return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/optimizer/sgd.py
sgd.py
from __future__ import absolute_import from .adam import Adam from .momentum import Momentum from .rmsprop import Rmsprop from .sgd import Sgd
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/optimizer/__init__.py
__init__.py
import numpy as np class Tanh: """ Tanh 激活函数层 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ flow_data = np.tanh(flow_data) return flow_data, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ # dtanh/dz = 1-a^2 flow_data = flow_data * (1 - np.power(layer["output"], 2)) return flow_data, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/activation/tanh.py
tanh.py
import numpy as np class Sigmoid: """ Sigmoid激活函数层 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ flow_data = 1 / (1 + np.exp(-flow_data)) return flow_data, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ # dsigmoid/dz = a*(1-a) flow_data = flow_data * (layer["output"]*(1-layer["output"])) return flow_data, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/activation/sigmoid.py
sigmoid.py
from .relu import Relu from .sigmoid import Sigmoid from .tanh import Tanh
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/activation/__init__.py
__init__.py
import numpy as np class Relu(): """ relu 激活函数层 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化,这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ flow_data = np.maximum(0, flow_data) return flow_data, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ # drelu/dz = 小于零就始终等于0 ,大于0就等于一 flow_data = flow_data * np.array(layer["output"] > 0) return flow_data, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/activation/relu.py
relu.py
import numpy as np class SoftMax: """ SoftMax层 一般用于最后一层分类 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ # print(flow_data.shape) # todo 不用循环 # for i in range(config['batch_size']): for i in range(flow_data.shape[1]): flow_data[:, i] = np.exp(flow_data[:, i]) / np.sum(np.exp(flow_data[:, i])) return flow_data, layer @staticmethod def compute_cost(flow_data, layer, label): """ 计算代价(交叉熵损失) :param flow_data: 前向传播最后一层输出 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param label: 标签 :return: 损失 """ batch_size = flow_data.shape[1] loss = 0.0 for i in range(batch_size): loss += -np.sum(np.dot(label[i], np.log(flow_data[:, i]))) loss = loss / batch_size return loss @staticmethod def backword(flow_data, layer, label): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ # 获取最末层误差信号 softmax反向传播 return flow_data - label.T @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/loss/softmax.py
softmax.py
import numpy as np class SVM: """ SVM损失层,又称为Hinge损失函数,一般用于最后一层分类 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播,这里没有操作,直接计算损失 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ # for i in range(config['batch_size']): # for i in range(flow_data.shape[1]): # flow_data[:, i] = np.exp(flow_data[:, i]) / np.sum(np.exp(flow_data[:, i])) return flow_data, layer @staticmethod def compute_cost(flow_data, layer, label): """ 计算代价(SVM损失,又称为Hinge损失) :param flow_data: 前向传播最后一层输出 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param label: 标签 :return: 损失 """ delta = 0.2 if 'delta' in layer.keys(): delta = layer['delta'] flow_data = flow_data.T batch_size = label.shape[0] loss = 0.0 for i in range(batch_size): # loss = max(0, 错误得分 - 正确得分 + delta) # 正确类别索引 right_index = np.argmax(label[i]) # # 正确类别值 positive_x = flow_data[i][right_index] # 代入hinge loss公式 temp = flow_data[i] - positive_x + delta # 剔除正确类里面的值 temp[right_index] = 0 # 小于零就转换为0, 大于零不变 相当于:temp=max(0, temp) temp = temp * np.array(temp > 0) loss += np.sum(temp) loss = loss / batch_size return loss @staticmethod def backword(flow_data, layer, label): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param label: 标签 :return: 流动数据, 更新后的层 """ delta = 0.2 if 'delta' in layer.keys(): delta = layer['delta'] flow_data = flow_data.T batch_size = label.shape[0] output = np.zeros(flow_data.shape) for i in range(batch_size): # loss += -np.sum(np.dot(batch_label[i], np.log(flow_data[:, i]))) # loss = max(0, 错误得分 - 正确得分 + delta) # 正确类别索引 right_index = np.argmax(label[i]) # # 正确类别值 positive_x = flow_data[i][right_index] # 代入hinge loss公式 temp = flow_data[i] - positive_x + delta # 剔除正确类里面的值 temp[right_index] = 0 # 小于零就转换为0, 大于零转行为1, 0 1掩码 temp = np.ones(temp.shape) * np.array(temp > 0) # 正确位置的梯度 temp[right_index] = -np.sum(temp) output[i] = temp # 获取最末层误差信号,反向传播 # print(output[0]) # print(output.shape) # exit() return output.T @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项, 这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/loss/svm.py
svm.py
from .softmax import SoftMax from .svm import SVM
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/loss/__init__.py
__init__.py
# https://blog.csdn.net/wjc1182511338/article/details/79285503 import numpy as np from .activation.tanh import Tanh from .activation.sigmoid import Sigmoid class LSTM: @staticmethod def init(layer, flow_data_shape): sequence_length = int(flow_data_shape["sequence_length"]) # forget 遗忘门 layer["weight_f"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) # input layer["weight_i"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) # current inputstate layer["weight_c"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) # output layer["weight_o"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) layer["bias_f"] = np.zeros((layer['neurons_number'], 1)) layer["bias_i"] = np.zeros((layer['neurons_number'], 1)) layer["bias_c"] = np.zeros((layer['neurons_number'], 1)) layer["bias_o"] = np.zeros((layer['neurons_number'], 1)) flow_data_shape = { "flatten_size": flow_data_shape["sequence_length"], "batch_size": flow_data_shape["batch_size"] } return layer, flow_data_shape @staticmethod def forword(layer, flow_data): # flow_data = flow_data[0] ht = np.zeros((layer['neurons_number'], flow_data.shape[0])) ct = np.zeros((layer['neurons_number'], flow_data.shape[0])) for i in range(flow_data.shape[1]): xt = flow_data[:, i] ft = Sigmoid.forword(np.dot(layer["weight_f"], np.concatenate(ht, xt)) + layer['bias_f']) it = Sigmoid.forword(np.dot(layer["weight_i"], np.concatenate(ht, xt)) + layer['bias_i']) _ct = Tanh.forword(np.dot(layer["weight_c"], np.concatenate(ht, xt)) + layer['bias_c']) ct = ft * ct + it * _ct ot = Sigmoid.forword(np.dot(layer["weight_o"], np.concatenate(ht, xt)) + layer['bias_o']) ht = ot * Tanh.forword(ct) # 缓存该层的输入 # todo 可能还有 weight_V # layer["weight_V_input"] = h # flow_data = np.dot( layer["weight_V"],h) + layer["bias_V"] # print(flow_data.shape) # exit() # print(flow_data.shape) # exit() return flow_data, layer @staticmethod def backword(flow_data, layer, config): output_all = np.zeros(layer["input"].shape) # print(output_all.shape) # exit() layer["weight_W_gradient"] = np.zeros(layer["weight_W"].shape) layer["weight_U_gradient"] = np.zeros(layer["weight_U"].shape) layer["bias_W_gradient"] = np.zeros(layer["bias_W"].shape) # todo 可能要列相加 layer["bias_V_gradient"] = flow_data layer["weight_V_gradient"] = np.dot(flow_data, layer['weight_V_input'].T) h = np.dot(layer["weight_V"].T, flow_data) for i in reversed(range(0, layer['input'].shape[1])): h = Tanh.backword(h, layer) layer["bias_W_gradient"] += np.sum(h, axis=1, keepdims=True) # print(h.shape) # print(layer["weight_W_input_"+str(i)].T.shape) # print(layer["weight_W_gradient"].shape) # print("----------") # exit() layer["weight_W_gradient"] += np.dot(h, layer["weight_W_input_" + str(i)].T) layer["weight_U_gradient"] += np.dot(h, layer["weight_U_input_" + str(i)]) output_all[:, i] = np.dot(h.T, layer["weight_U"]) h = np.dot(layer["weight_W"].T, h) # print(output_all.shape) # exit() return layer, output_all
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/lstm_3.py
lstm_3.py
import numpy as np from ..optimizer.adam import Adam from ..optimizer.momentum import Momentum from ..optimizer.rmsprop import Rmsprop from ..optimizer.sgd import Sgd class Convolutional: """ 卷积层 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ # 何凯明初始化,主要针对relu激活函数 if layer["weight_init"] == 'msra': layer["W"] = np.random.randn(layer['kernel_number'], flow_data_shape['channel'], layer['kernel_height'], layer['kernel_width']) * ( np.sqrt(2 / (flow_data_shape['channel'] * layer['kernel_height'] * layer['kernel_width']))) # xavier,主要针对tanh激活函数 elif layer["weight_init"] == 'xavier': layer["W"] = np.random.randn(layer['kernel_number'], flow_data_shape['channel'], layer['kernel_height'], layer['kernel_width']) * ( np.sqrt(1 / (flow_data_shape['channel'] * layer['kernel_height'] * layer['kernel_width']))) else: layer["W"] = np.random.randn(layer['kernel_number'], flow_data_shape['channel'], layer['kernel_height'], layer['kernel_width']) * 0.01 layer["b"] = np.zeros((layer['kernel_number'], 1, 1, 1)) flow_data_shape = { "batch_size": flow_data_shape['batch_size'], "channel": layer['kernel_number'], "height": ((flow_data_shape['height'] + layer['padding'] * 2 - layer['kernel_height'])) // layer[ 'stride'] + 1, "width": ((flow_data_shape['width'] + layer['padding'] * 2 - layer['kernel_width']) // layer['stride']) + 1 } print(layer['name'] + ",W.shape:", layer["W"].shape) print(layer['name'] + ",b.shape:", layer["b"].shape) return layer, flow_data_shape @staticmethod # 多核也没问题 def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ padding = layer['padding'] if padding != 0: flow_data = Convolutional.padding(flow_data, padding) layer['padding_input'] = flow_data kernel_height = layer['kernel_height'] kernel_width = layer['kernel_width'] batch_size = flow_data.shape[0] output_height = ((flow_data.shape[2] - kernel_width) // layer['stride']) + 1 output_width = ((flow_data.shape[3] - kernel_height) // layer['stride']) + 1 # 卷积输出 output = np.zeros((batch_size, layer['kernel_number'], output_height, output_width)) # 开始卷积 for channel in range(output.shape[1]): # 遍历输出的通道数,输出的通道数等于卷积核的个数 for height in range(output.shape[2]): # 遍历输出的高 for width in range(output.shape[3]): # 遍历输出的宽 # 滑动窗口截取部分 sliding_window = flow_data[:,:, height * layer['stride']:height * layer['stride'] + kernel_height, width * layer['stride']:width * layer['stride'] + kernel_width ] output[:,channel,height,width] = np.sum(np.sum(np.sum((sliding_window * layer["W"][channel]) + layer["b"][channel], axis=2), axis=2), axis=1) return output, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ layer["dW"] = np.zeros(layer['W'].shape) layer["db"] = np.zeros(layer['b'].shape) kernel_height = layer['kernel_height'] kernel_width = layer['kernel_width'] if layer['padding'] != 0: forword_input = layer['padding_input'] else: forword_input = layer['input'] output = np.zeros((forword_input.shape)) for channel in range(flow_data.shape[1]): # 遍历输入梯度的通道数,输入梯度的通道数等于卷积核的个数 for height in range(flow_data.shape[2]): # 遍历输入梯度的高 for width in range(flow_data.shape[3]): # 遍历输入梯度的宽 # 前向传播输入数据,滑动截取窗口 sliding_window = forword_input[:,:, height * layer['stride']:height * layer['stride'] + kernel_height, width * layer['stride']:width * layer['stride'] + kernel_width ] # dx output[:,:, height * layer['stride']:height * layer['stride'] + kernel_height, width * layer['stride']:width * layer['stride'] + kernel_width ] += flow_data[:,channel,height,width].reshape(flow_data.shape[0], 1, 1, 1) * layer['W'][channel] # 单个卷积核梯度 = 前向输入数据的滑动窗口 * 梯度对应通道(卷积核),对应高宽 layer["dW"][channel] += np.mean(flow_data[:,channel,height,width].reshape(flow_data.shape[0], 1, 1, 1) * sliding_window, axis=0) layer["db"][channel][0][0][0] += np.mean(flow_data[:,channel,height,width]) if layer['padding'] != 0: output = Convolutional.delete_padding(output, layer['padding']) return output, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ # 需要更新的键名 keys = ['W', 'b'] if "optimizer" in config.keys() and config["optimizer"] == 'momentum': layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient']) elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop': layer = Rmsprop.update_parameters(layer, keys, config['learning_rate']) elif "optimizer" in config.keys() and config["optimizer"] == 'adam': layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration) else: # 默认使用 sgd layer = Sgd.update_parameters(layer, keys, config['learning_rate']) return layer @staticmethod def padding(flow_data, padding): """ 填充 :param flow_data: 流动数据 :param padding: 填充多少层0 :return: """ padding_flow_data = np.zeros((flow_data.shape[0], flow_data.shape[1], flow_data.shape[2] + padding * 2, flow_data.shape[3] + padding * 2)) for batch in range(flow_data.shape[0]): # 遍历总样本数 for channel in range(flow_data.shape[1]): # 遍历 通道数 # 在二位矩阵外面填充 padding圈零 padding_flow_data[batch][channel] = np.pad(flow_data[batch][channel], ((padding, padding), (padding, padding)), 'constant') return padding_flow_data @staticmethod def delete_padding(flow_data, padding): """ 删除填充 :param flow_data: 流动数据 :param padding: 去掉外面多少层 :return: """ # 定义结构 delete_padding_flow_data = np.zeros((flow_data.shape[0], flow_data.shape[1], flow_data.shape[2] - padding * 2, flow_data.shape[3] - padding * 2)) for batch in range(flow_data.shape[0]): for channel in range(flow_data.shape[1]): height = flow_data[batch][channel].shape[0] width = flow_data[batch][channel].shape[1] # 对应位置复制过来 delete_padding_flow_data[batch][channel] = flow_data[batch][channel][padding:height - padding, padding:width - padding] return delete_padding_flow_data
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/convolutional.py
convolutional.py
# 参考:https://www.cnblogs.com/pinard/p/6519110.html import numpy as np from ..optimizer.adam import Adam from ..optimizer.momentum import Momentum from ..optimizer.rmsprop import Rmsprop from ..optimizer.sgd import Sgd class LSTM: @staticmethod def init(layer, flow_data_shape, config): sequence_length = int(flow_data_shape["sequence_length"]) neurons_number = layer['neurons_number'] # 何凯明初始化,主要针对relu激活函数 if layer["weight_init"] == 'msra': layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(2 / neurons_number)) # xavier,主要针对tanh激活函数 elif layer["weight_init"] == 'xavier': layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(1 / neurons_number)) else: layer["Wf"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Uf"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["Wi"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Ui"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["Wa"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Ua"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["Wo"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Uo"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["V"] = np.random.randn(sequence_length, neurons_number) * 0.01 layer["bf"] = np.zeros((neurons_number, 1)) layer["bi"] = np.zeros((neurons_number, 1)) layer["ba"] = np.zeros((neurons_number, 1)) layer["bo"] = np.zeros((neurons_number, 1)) layer["c"] = np.zeros((sequence_length, 1)) flow_data_shape = { "flatten_size": sequence_length, "batch_size": flow_data_shape["batch_size"] } return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): ht = np.zeros((layer['neurons_number'], flow_data.shape[0])) layer["cache_ht_-1"] = ht ct = np.zeros((layer['neurons_number'], flow_data.shape[0])) layer["cache_ct_-1"] = ht for i in range(flow_data.shape[1]): xt = flow_data[:, i] layer["cache_xt_" + str(i)] = xt # 遗忘门 forget ft_1 = np.dot(layer["Wf"], ht) layer["cache_ft_1_" + str(i)] = ft_1 ft_2 = np.dot(layer["Uf"], xt.T) layer["cache_ft_2_" + str(i)] = ft_2 ft_3 = ft_1 + ft_2 + layer["bf"] layer["cache_ft_3_" + str(i)] = ft_3 # ft = Sigmoid.forword(ft_3) ft = 1 / (1 + np.exp(-ft_3)) layer["cache_ft_" + str(i)] = ft # 输入门1 input it_1 = np.dot(layer["Wi"], ht) layer["cache_it_1_" + str(i)] = it_1 it_2 = np.dot(layer["Ui"], xt.T) layer["cache_it_2_" + str(i)] = it_2 it_3 = it_1 + it_2 + layer["bi"] layer["cache_it_3_" + str(i)] = it_3 # it = Sigmoid.forword(it_3) it = 1 / (1 + np.exp(-it_3)) layer["cache_it_" + str(i)] = it # 输入门2 at_1 = np.dot(layer["Wa"], ht) layer["cache_at_1_" + str(i)] = at_1 at_2 = np.dot(layer["Ua"], xt.T) layer["cache_at_2_" + str(i)] = at_2 at_3 = at_1 + at_2 + layer["ba"] layer["cache_at_3_" + str(i)] = at_3 # at = Tanh.forword(at_3, layer, is_train) at = np.tanh(at_3) layer["cache_at_" + str(i)] = at # 细胞状态更新 ct_1 = ct * ft layer["cache_ct_1_" + str(i)] = ct_1 ct_2 = it * at layer["cache_ct_2_" + str(i)] = ct_2 ct = ct_1 + ct_2 layer["cache_ct_" + str(i)] = ct ot_1 = np.dot(layer["Wo"], ht) layer["cache_ot_1_" + str(i)] = ot_1 ot_2 = np.dot(layer["Uo"], xt.T) layer["cache_ot_2_" + str(i)] = ot_2 ot_3 = ot_1 + ot_2 + layer["bo"] layer["cache_ot_3_" + str(i)] = ot_3 # ot = Sigmoid.forword(ot_3) ot = 1 / (1 + np.exp(-ot_3)) layer["cache_ot_" + str(i)] = ot # 输出门 # ht_1 = Tanh.forword(ct) ht_1 = np.tanh(ct) layer["cache_ht_1_" + str(i)] = ht_1 ht = ot * ht_1 layer["cache_ht_" + str(i)] = ht flow_data = np.dot(layer["V"], ht) + layer["c"] return flow_data, layer @staticmethod def backword(flow_data, layer, config): sequence_number = layer['input'].shape[1] ct = np.zeros(layer["cache_ct_0"].shape) layer["dc"] = np.sum(flow_data, axis=1, keepdims=True) layer["dV"] = np.dot(flow_data, layer["cache_ht_" + str(sequence_number - 1)].T) ht = np.dot(layer["V"].T, flow_data) output = np.zeros(layer["input"].shape) layer["dbo"] = np.zeros(layer["bo"].shape) layer["dWo"] = np.zeros(layer["Wo"].shape) layer["dUo"] = np.zeros(layer["Uo"].shape) layer["dba"] = np.zeros(layer["ba"].shape) layer["dWa"] = np.zeros(layer["Wa"].shape) layer["dUa"] = np.zeros(layer["Ua"].shape) layer["dbi"] = np.zeros(layer["bi"].shape) layer["dWi"] = np.zeros(layer["Wi"].shape) layer["dUi"] = np.zeros(layer["Ui"].shape) layer["dbf"] = np.zeros(layer["bf"].shape) layer["dWf"] = np.zeros(layer["Wf"].shape) layer["dUf"] = np.zeros(layer["Uf"].shape) for i in reversed(range(0, sequence_number)): ht_1 = ht * layer["cache_ot_" + str(i)] # ct = ct + Tanh.backword(ht_1, layer["cache_ht_1_" + str(i)]) # dtanh/dz = 1-a^2 ct = ct + ht_1 * (1 - np.power(layer["cache_ht_1_" + str(i)], 2)) ct_1 = ct ct = ct_1 * layer["cache_ft_" + str(i)] ot = ht * layer["cache_ht_1_" + str(i)] # ot_3 = Sigmoid.backword(ot, layer["cache_ot_" + str(i)]) # dsigmoid/dz = a*(1-a) ot_3 = ot * (layer["cache_ot_" + str(i)]*(1-layer["cache_ot_" + str(i)])) layer["dbo"] += np.sum(ot_3, axis=1, keepdims=True) layer["dWo"] += np.dot(ot_3, layer["cache_ht_" + str(i)].T) layer["dUo"] += np.dot(ot_3, layer["cache_xt_" + str(i)]) ot_2 = ot_3 ot_1 = ot_3 ct_2 = ct at = ct_2 * layer["cache_it_" + str(i)] # at_3 = Tanh.backword(at, layer["cache_at_" + str(i)]) # dtanh/dz = 1-a^2 at_3 = at * (1 - np.power(layer["cache_at_" + str(i)], 2)) layer["dba"] += np.sum(at_3, axis=1, keepdims=True) layer["dWa"] += np.dot(at_3, layer["cache_ht_" + str(i)].T) layer["dUa"] += np.dot(at_3, layer["cache_xt_" + str(i)]) at_1 = at_3 at_2 = at_3 it = ct_2 * layer["cache_at_" + str(i)] # it_3 = Sigmoid.backword(it, layer["cache_it_" + str(i)]) # dsigmoid/dz = a*(1-a) it_3 = ot * (layer["cache_it_" + str(i)]*(1-layer["cache_it_" + str(i)])) layer["dbi"] += np.sum(it_3, axis=1, keepdims=True) layer["dWi"] += np.dot(it_3, layer["cache_ht_" + str(i)].T) layer["dUi"] += np.dot(it_3, layer["cache_xt_" + str(i)]) it_2 = it_3 it_1 = it_3 ft = ct_1 * layer["cache_ct_" + str(i)] # ft_3 = Sigmoid.backword(ft, layer["cache_ft_" + str(i)]) # dsigmoid/dz = a*(1-a) ft_3 = ft * (layer["cache_ft_" + str(i)]*(1-layer["cache_ft_" + str(i)])) layer["dbf"] += np.sum(ft_3, axis=1, keepdims=True) layer["dWf"] += np.dot(ft_3, layer["cache_ht_" + str(i)].T) layer["dUf"] += np.dot(ft_3, layer["cache_xt_" + str(i)]) ft_2 = ft_3 ft_1 = ft_3 xt = np.dot(layer["Uf"].T, ft_2) + np.dot(layer["Ui"].T, it_2) + np.dot(layer["Ua"].T, at_2) + np.dot( layer["Uo"].T, ot_2) ht = np.dot(layer["Wf"].T, ft_1) + np.dot(layer["Wi"].T, it_1) + np.dot(layer["Wa"].T, at_1) + np.dot( layer["Wo"].T, ot_1) output[:, i] = xt.T return output, layer # @staticmethod # def update_parameters(layer, config, iteration): # layer["Wf"] -= config["learning_rate"] * layer["dWf"] # layer["Uf"] -= config["learning_rate"] * layer["dUf"] # layer["Wi"] -= config["learning_rate"] * layer["dWi"] # layer["Ui"] -= config["learning_rate"] * layer["dUi"] # layer["Wa"] -= config["learning_rate"] * layer["dWa"] # layer["Ua"] -= config["learning_rate"] * layer["dUa"] # layer["Wo"] -= config["learning_rate"] * layer["dWo"] # layer["Uo"] -= config["learning_rate"] * layer["dUo"] # layer["V"] -= config["learning_rate"] * layer["dV"] # layer["bf"] -= config["learning_rate"] * layer["dbf"] # layer["bi"] -= config["learning_rate"] * layer["dbi"] # layer["ba"] -= config["learning_rate"] * layer["dba"] # layer["bo"] -= config["learning_rate"] * layer["dbo"] # layer["c"] -= config["learning_rate"] * layer["dc"] # return layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ # 要更新的键名 keys = ['Wf', 'Uf', 'Wi', 'Ui', 'Wa', 'Ua', 'Wo', 'Uo', 'V', 'bf', 'bi', 'ba', 'bo', 'c'] if "optimizer" in config.keys() and config["optimizer"] == 'momentum': layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient']) elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop': layer = Rmsprop.update_parameters(layer, keys, config['learning_rate']) elif "optimizer" in config.keys() and config["optimizer"] == 'adam': layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration) else: # 默认使用 sgd layer = Sgd.update_parameters(layer, keys, config['learning_rate']) return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/lstm.py
lstm.py
import numpy as np class Dropout: """ Dropout层 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ if is_train: # 只有训练时才用dropout assert (layer["drop_rate"] >= 0 and layer["drop_rate"] <= 1) mask = np.random.rand(flow_data.shape[0], flow_data.shape[1]) # 生成 0 1 掩码 layer['dropout'] = np.ones((flow_data.shape[0], flow_data.shape[1])) * (np.array(mask > layer["drop_rate"])) # rescale: 输出期望值 = (1 - drop_rate)*原始期望值 / (1 - drop_rate) 保持平均值不变 flow_data = flow_data * layer['dropout'] / (1 - layer["drop_rate"]) return flow_data, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ # rescale: 输出期望值 = (1 - drop_rate)*原始期望值 / (1 - drop_rate) 保持平均值不变 flow_data = flow_data * layer['dropout'] / (1 - layer["drop_rate"]) return flow_data, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项,这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/dropout.py
dropout.py
import numpy as np class Pooling: """ 池化层 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ flow_data_shape = { "batch_size": flow_data_shape['batch_size'], "channel": flow_data_shape['channel'], "height": (flow_data_shape['height'] - layer['kernel_height']) // layer['stride'] + 1, "width": (flow_data_shape['width'] - layer['kernel_width']) // layer['stride'] + 1 } return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ kernel_height = layer['kernel_height'] kernel_width = layer['kernel_width'] batch_size = flow_data.shape[0] channels = flow_data.shape[1] output_width = ((flow_data.shape[2] - kernel_width) // layer['stride']) + 1 output_height = ((flow_data.shape[3] - kernel_height) // layer['stride']) + 1 # 池化总输出 pooling_out = np.zeros((batch_size, channels, output_width, output_height)) # 开始池化 for batch in range(batch_size): # 遍历输出样本数 for channel in range(channels): # 遍历输出通道数 for height in range(output_height): # 遍历输出高 for width in range(output_width): # 遍历输出宽 # 滑动窗口截取部分 sliding_window = flow_data[batch][channel][ height * layer['stride']:height * layer['stride'] + kernel_height, width * layer['stride']:width * layer['stride'] + kernel_width ] if 'mode' in layer.keys() and layer['mode'] == 'average': # 平均池化 pooling_out[batch][channel][height][width] = np.average(sliding_window) else: # 默认取最大值 pooling_out[batch][channel][height][width] = np.max(sliding_window) return pooling_out, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ kernel_height = layer['kernel_height'] kernel_width = layer['kernel_width'] kernel_total = kernel_height*kernel_width stride = layer['stride'] output = np.zeros(layer['input'].shape) batch_size = flow_data.shape[0] # np.savetxt("input.csv", flow_data[0][0], delimiter=',') # np.savetxt("forward_input.csv", layer['input'][0][0], delimiter=',') for batch in range(batch_size): for channel in range(flow_data.shape[1]): for height in range(flow_data.shape[2]): for width in range(flow_data.shape[3]): if 'mode' in layer.keys() and layer['mode'] == 'average': # 平均池化 output[batch][channel][ height * stride:height * stride + kernel_height, width * stride:width * stride + kernel_width ] += flow_data[batch][channel][height][width]/kernel_total else: # 滑动窗口截取部分 sliding_window = layer['input'][batch][channel][ height * stride:height * stride + kernel_height, width * stride:width * stride + kernel_width ] # 默认取最大值 max_height, max_width =np.unravel_index(sliding_window.argmax(), sliding_window.shape) output[batch][channel][max_height+height * stride][max_width+width * stride] += flow_data[batch][channel][height][width] return output, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项,这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/pooling.py
pooling.py
import numpy as np from ..optimizer.adam import Adam from ..optimizer.momentum import Momentum from ..optimizer.rmsprop import Rmsprop from ..optimizer.sgd import Sgd class FullyConnected: """ 全连接层 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ flatten_size = int(flow_data_shape["flatten_size"]) if layer["weight_init"] == 'msra': # 何凯明初始化,主要针对relu激活函数 layer["W"] = np.random.randn(layer['neurons_number'], flatten_size) * (np.sqrt(2 / flatten_size)) elif layer["weight_init"] == 'xavier': # xavier,主要针对tanh激活函数 layer["W"] = np.random.randn(layer['neurons_number'], flatten_size) * (np.sqrt(1 / flatten_size)) else: # 高斯初始化 layer["W"] = np.random.randn(layer['neurons_number'], flatten_size) * 0.01 layer["b"] = np.zeros((layer['neurons_number'], 1)) flow_data_shape = { "flatten_size": layer['neurons_number'], "batch_size": flow_data_shape["batch_size"] } print(layer['name']+",W.shape:", layer["W"].shape) print(layer['name']+",b.shape:", layer["b"].shape) return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ flow_data = np.dot(layer['W'], flow_data) + layer['b'] return flow_data, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ layer["dW"] = (1 / config['batch_size']) * np.dot(flow_data, layer['input'].T) layer["db"] = (1 / config['batch_size']) * np.sum(flow_data, axis=1, keepdims=True) # dx flow_data = np.dot(layer['W'].T, flow_data) return flow_data, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ # 要更新的键名 keys = ['W', 'b'] if "optimizer" in config.keys() and config["optimizer"] == 'momentum': layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient']) elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop': layer = Rmsprop.update_parameters(layer, keys, config['learning_rate']) elif "optimizer" in config.keys() and config["optimizer"] == 'adam': layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration) else: # 默认使用 sgd layer = Sgd.update_parameters(layer, keys, config['learning_rate']) return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/fully_connected.py
fully_connected.py
# https://blog.csdn.net/flyinglittlepig/article/details/72229041 import numpy as np from .activation.tanh import Tanh from .activation.sigmoid import Sigmoid class LSTM(): @staticmethod def init(layer, flow_data_shape): sequence_length = int(flow_data_shape["sequence_length"]) neurons_number = layer['neurons_number'] # 何凯明初始化,主要针对relu激活函数 if layer["weight_init"] == 'msra': layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number)) layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length)) layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(2 / neurons_number)) # xavier,主要针对tanh激活函数 elif layer["weight_init"] == 'xavier': layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number)) layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length)) layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(1 / neurons_number)) else: layer["Wf"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Uf"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["Wi"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Ui"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["Wa"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Ua"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["Wo"] = np.random.randn(neurons_number, neurons_number) * 0.01 layer["Uo"] = np.random.randn(neurons_number, sequence_length) * 0.01 layer["V"] = np.random.randn(sequence_length, neurons_number) * 0.01 layer["bf"] = np.zeros((neurons_number, 1)) layer["bi"] = np.zeros((neurons_number, 1)) layer["ba"] = np.zeros((neurons_number, 1)) layer["bo"] = np.zeros((neurons_number, 1)) layer["c"] = np.zeros((sequence_length, 1)) flow_data_shape = { "flatten_size": sequence_length, "batch_size": flow_data_shape["batch_size"] } return layer, flow_data_shape @staticmethod def forword(layer, flow_data): ht = np.zeros((layer['neurons_number'], flow_data.shape[0])) layer["cache_ht_0"] = ht ct = np.zeros((layer['neurons_number'], flow_data.shape[0])) layer["cache_ct_-1"] = ht for i in range(flow_data.shape[1]): xt = flow_data[:, i] layer["cache_xt_" + str(i)] = xt print(ht.shape) print(xt.shape) exit() ht_xt = np.concatenate(ht, xt) layer["cache_ht_xt_" + str(i)] = ht_xt ft_1 = np.dot(layer["Wf"], ht_xt) + layer["bf"] layer["cache_ft_1_" + str(i)] = ft_1 ft = Sigmoid.forword(ft_1) layer["cache_ft_" + str(i)] = ft it_1 = np.dot(layer["Wi"], ht_xt) + layer["bi"] layer["cache_it_1_" + str(i)] = it_1 it = Sigmoid.forword(it_1) layer["cache_it_" + str(i)] = it at_1 = np.dot(layer["Wa"], ht_xt) + layer["ba"] layer["cache_at_1_" + str(i)] = at_1 at = Tanh.forword(at_1) layer["cache_at_" + str(i)] = at ot_1 = np.dot(layer["Wo"], ht_xt) + layer["bo"] layer["cache_ot_1_" + str(i)] = ot_1 ot = Sigmoid.forword(ot_1) layer["cache_ot_" + str(i)] = ot ct_1 = ct * ft layer["cache_ct_1_" + str(i)] = ct_1 ct_2 = it * at layer["cache_ct_2_" + str(i)] = ct_2 ct = ct_1 + ct_2 layer["cache_ct_" + str(i)] = ct ht_1 = Tanh.forword(ct) layer["cache_ht_1_" + str(i)] = ht_1 ht = ot * ht_1 layer["cache_ht_" + str(i)] = ht yt = np.dot(layer["Wy"], ht) + layer["by"] layer["cache_yt"] = yt flow_data = yt # print(flow_data.shape) # exit() # print(flow_data.shape) # exit() return flow_data, layer @staticmethod def backword(flow_data, layer, config): sequence_number = layer['input'].shape[1] layer["dy"] = flow_data layer["dWy"] = np.dot(flow_data, layer["cache_ht_" + str(sequence_number - 1)].T) ht = np.dot(layer["Wy"].T, flow_data) output = np.zeros(layer["input"].shape) layer["dbo"] = np.zeros(layer["bo"].shape) layer["dWo"] = np.zeros(layer["Wo"].shape) layer["dUo"] = np.zeros(layer["Uo"].shape) layer["dba"] = np.zeros(layer["ba"].shape) layer["dWa"] = np.zeros(layer["Wa"].shape) layer["dUa"] = np.zeros(layer["Ua"].shape) layer["dbi"] = np.zeros(layer["bi"].shape) layer["dWi"] = np.zeros(layer["Wi"].shape) layer["dUi"] = np.zeros(layer["Ui"].shape) layer["dbf"] = np.zeros(layer["bf"].shape) layer["dWf"] = np.zeros(layer["Wf"].shape) layer["dUf"] = np.zeros(layer["Uf"].shape) ct = np.zeros(layer["cache_ct_0"].shape) for i in reversed(range(0, sequence_number)): ht_1 = ht * layer["cache_ot_" + str(i)] ct = Tanh.backword(ht_1, layer["cache_ht_1_" + str(i)]) + ct ct_1 = ct ct = ct_1 * layer["cache_ft_" + str(i)] ct_2 = ct ot = ht * layer["cache_ht_1_" + str(i)] ot_1 = Sigmoid.backword(ot, layer["cache_ot_" + str(i)]) layer["dbo"] += np.sum(ot_1, axis=1, keepdims=True) layer["dWo"] += np.dot(ot_1, layer["cache_ht_xt_" + str(i)].T) at = ct_2 * layer["cache_it_" + str(i)] at_1 = Tanh.backword(at, layer["cache_at_" + str(i)]) layer["dba"] += np.sum(at_1, axis=1, keepdims=True) layer["dWa"] += np.dot(at_1, layer["cache_ht_xt_" + str(i)].T) it = ct_2 * layer["cache_at_" + str(i)] it_1 = Sigmoid.backword(it, layer["cache_it_" + str(i)]) layer["dbi"] += np.sum(it_1, axis=1, keepdims=True) layer["dWi"] += np.dot(it_1, layer["cache_ht_xt_" + str(i)].T) ft = ct_1 * layer["cache_ct_" + str(i - 1)] ft_1 = Sigmoid.backword(ft, layer["cache_ft_" + str(i)]) layer["dbf"] += np.sum(ft_1, axis=1, keepdims=True) layer["dWf"] += np.dot(ft_1, layer["cache_ht_xt_" + str(i)].T) # ht_xt = np.dot(layer["Uf"].T, ft_2) + np.dot(layer["Ui"].T, it_2) + np.dot(layer["Ua"].T, at_2) + np.dot( # layer["Uo"].T, ot_2) ht_xt = np.dot(layer["Wf"].T, ft_1) + np.dot(layer["Wi"].T, it_1) + np.dot(layer["Wa"].T, at_1) + np.dot( layer["Wo"].T, ot_1) ht = ht_xt[:ht.shape[0]] xt = ht_xt[ht.shape[0]:] output[:, i] = xt.T return layer, output # 输出单元激活函数 @staticmethod def softmax(x): x = np.array(x) max_x = np.max(x) return np.exp(x - max_x) / np.sum(np.exp(x - max_x))
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/lstm_2.py
lstm_2.py
import numpy as np class BatchNormalization: """ 批归一化 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ flatten_size = int(flow_data_shape["flatten_size"]) layer["gamma"] = np.ones((flatten_size, 1)) layer["beta"] = np.zeros((flatten_size, 1)) print(layer['name'] + ",gamma.shape:", layer["gamma"].shape) print(layer['name'] + ",beta.shape:", layer["beta"].shape) return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :return: 流动数据, 更新后的层 """ epsilon = 1e-8 layer['mean'] = np.mean(flow_data, axis=1, keepdims=True) layer['std'] = np.std(flow_data, axis=1, keepdims=True) layer['norm'] = (flow_data - layer['mean']) / (layer['std'] + epsilon) flow_data = layer["gamma"] * layer['norm'] + layer["beta"] return flow_data, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config: 配置 :return: 流动数据, 更新后的层 """ epsilon = 1e-8 # gamma 的梯度 layer["dgamma"] = np.sum(flow_data * layer['norm'], axis=1, keepdims=True) # beta 的梯度 layer["dbeta"] = np.sum(flow_data, axis=1, keepdims=True) flow_data = (layer["gamma"] / (layer['std'] + epsilon)) * ( flow_data - layer["dgamma"] * layer['norm'] / config['batch_size'] - np.mean(flow_data, axis=1, keepdims=True)) return flow_data, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新参数 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config: 配置 :param iteration: 迭代次数 :return: """ layer["gamma"] -= config["learning_rate"] * layer["dgamma"] layer["beta"] -= config["learning_rate"] * layer["dbeta"] return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/batch_normalization.py
batch_normalization.py
import numpy as np from ..optimizer.adam import Adam from ..optimizer.momentum import Momentum from ..optimizer.rmsprop import Rmsprop from ..optimizer.sgd import Sgd class RNN: @staticmethod def init(layer, flow_data_shape, config): sequence_length = int(flow_data_shape["sequence_length"]) # 何凯明初始化,主要针对relu激活函数 if layer["weight_init"] == 'msra': layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * ( np.sqrt(2 / sequence_length)) layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * ( np.sqrt(2 / layer['neurons_number'])) layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * ( np.sqrt(2 / layer['neurons_number'])) # xavier,主要针对tanh激活函数 elif layer["weight_init"] == 'xavier': layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * ( np.sqrt(1 / sequence_length)) layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * ( np.sqrt(1 / layer['neurons_number'])) layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * ( np.sqrt(1 / layer['neurons_number'])) else: layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * 0.01 layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * 0.01 layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * 0.01 layer["bW"] = np.zeros((layer['neurons_number'], 1)) layer["bV"] = np.zeros((flow_data_shape["sequence_length"], 1)) flow_data_shape = { "flatten_size": flow_data_shape["sequence_length"], "batch_size": flow_data_shape["batch_size"] } return layer, flow_data_shape @staticmethod def forword(layer, flow_data, is_train): # flow_data = flow_data[0] h = np.zeros((layer['neurons_number'], flow_data.shape[0])) for i in range(flow_data.shape[1]): sequence = flow_data[:, i] layer["U_input_" + str(i)] = sequence U_multiply_X = np.dot(layer["U"], sequence.T) layer["W_input_" + str(i)] = h W_multiply_h = np.dot(layer["W"], h) h = U_multiply_X + W_multiply_h h = h + layer["bW"] h = np.tanh(h) layer["tanh_output"] = h # 缓存该层的输入 layer["V_input"] = h flow_data = np.dot(layer["V"], h) + layer["bV"] return flow_data, layer @staticmethod def backword(flow_data, layer, config): output_all = np.zeros(layer["input"].shape) layer["dW"] = np.zeros(layer["W"].shape) layer["dU"] = np.zeros(layer["U"].shape) layer["dbW"] = np.zeros(layer["bW"].shape) layer["dbV"] = np.sum(flow_data, axis=1, keepdims=True) layer["dV"] = np.dot(flow_data, layer['V_input'].T) h = np.dot(layer["V"].T, flow_data) for i in reversed(range(0, layer['input'].shape[1])): # tanh 梯度 h = h * (1 - np.power(layer["tanh_output"], 2)) layer["dbW"] += np.sum(h, axis=1, keepdims=True) layer["dW"] += np.dot(h, layer["W_input_" + str(i)].T) layer["dU"] += np.dot(h, layer["U_input_" + str(i)]) output_all[:, i] = np.dot(h.T, layer["U"]) h = np.dot(layer["W"].T, h) return output_all, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ # 要更新的键名 keys = ['U', 'W', 'V', 'bW', 'bV'] if "optimizer" in config.keys() and config["optimizer"] == 'momentum': layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient']) elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop': layer = Rmsprop.update_parameters(layer, keys, config['learning_rate']) elif "optimizer" in config.keys() and config["optimizer"] == 'adam': layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration) else: # 默认使用 sgd layer = Sgd.update_parameters(layer, keys, config['learning_rate']) return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/rnn.py
rnn.py
from .batch_normalization import BatchNormalization from .convolutional import Convolutional from .dropout import Dropout from .flatten import Flatten from .fully_connected import FullyConnected from .lstm import LSTM from .pooling import Pooling from .rnn import RNN
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/__init__.py
__init__.py
class Flatten: """ 展平数据,一般用于卷积层和全连接层中间 """ @staticmethod def init(layer, flow_data_shape, config): """ 初始化 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param flow_data_shape: 流动数据的形状 :param config:配置 :return: 更新后的层, 流动数据的形状 """ flow_data_shape = { "flatten_size": flow_data_shape["channel"] * flow_data_shape["height"] * flow_data_shape["width"], "batch_size": flow_data_shape["batch_size"] } return layer, flow_data_shape @staticmethod def forword(flow_data, layer, is_train): """ 前向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param is_train: 是否是训练模式 :return: 流动数据, 更新后的层 """ # X_train shape: (60000, 1, 28, 28) ——> (784, 60000) # 流动数据,一层一层的计算,并先后流动 flow_data = flow_data.reshape(flow_data.shape[0], -1).T return flow_data, layer @staticmethod def backword(flow_data, layer, config): """ 反向传播 :param flow_data: 流动数据 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :return: 流动数据, 更新后的层 """ flow_data = flow_data.T # 变化为和前向传播时输入的尺寸一样 flow_data = flow_data.reshape(layer['input'].shape[0], layer['input'].shape[1], layer['input'].shape[2], layer['input'].shape[3]) return flow_data, layer @staticmethod def update_parameters(layer, config, iteration): """ 更新权重和偏置项,这里无操作 :param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息 :param config:配置 :param iteration:迭代次数 :return: 更新后的层 """ return layer
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/flatten.py
flatten.py
# -*- coding: utf-8 -*- """Reuters topic classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from .data_utils import get_file # from ..preprocessing.sequence import _remove_long_seq import numpy as np import json import warnings def load_data(path='reuters.npz', num_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3, **kwargs): """Loads the Reuters newswire classification dataset. # Arguments path: where to cache the data (relative to `~/.aadeeplearning/dataset`). num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: truncate sequences after this length. test_split: Fraction of the dataset to be used as test data. seed: random seed for sample shuffling. start_char: The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. oov_char: words that were cut out because of the `num_words` or `skip_top` limit will be replaced with this character. index_from: index actual words with this index and higher. # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. Note that the 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped. """ # Legacy support if 'nb_words' in kwargs: warnings.warn('The `nb_words` argument in `load_data` ' 'has been renamed `num_words`.') num_words = kwargs.pop('nb_words') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) path = get_file(path, origin='https://s3.amazonaws.com/text-datasets/reuters.npz', file_hash='87aedbeb0cb229e378797a632c1997b6') with np.load(path) as f: xs, labels = f['x'], f['y'] np.random.seed(seed) indices = np.arange(len(xs)) np.random.shuffle(indices) xs = xs[indices] labels = labels[indices] if start_char is not None: xs = [[start_char] + [w + index_from for w in x] for x in xs] elif index_from: xs = [[w + index_from for w in x] for x in xs] # if maxlen: # xs, labels = _remove_long_seq(maxlen, xs, labels) if not num_words: num_words = max([max(x) for x in xs]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: # 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs] else: xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = int(len(xs) * (1 - test_split)) x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) return (x_train, y_train), (x_test, y_test) def get_word_index(path='reuters_word_index.json'): """Retrieves the dictionary mapping words to word indices. # Arguments path: where to cache the data (relative to `~/.aadeeplearning/dataset`). # Returns The word index dictionary. """ path = get_file( path, origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json', file_hash='4d44cc38712099c9e383dc6e5f11a921') f = open(path) data = json.load(f) f.close() return data
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/reuters.py
reuters.py
"""Utilities for file download and caching.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import hashlib import multiprocessing as mp import os import random import shutil import sys import tarfile import threading import time import warnings import zipfile from abc import abstractmethod from contextlib import closing from multiprocessing.pool import ThreadPool import numpy as np import six from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError from six.moves.urllib.request import urlopen try: import queue except ImportError: import Queue as queue from .generic_utils import Progbar if sys.version_info[0] == 2: def urlretrieve(url, filename, reporthook=None, data=None): """Replacement for `urlretrive` for Python 2. Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy `urllib` module, known to have issues with proxy management. # Arguments url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of the network connection and once after each block read thereafter. The hook will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. data: `data` argument passed to `urlopen`. """ def chunk_read(response, chunk_size=8192, reporthook=None): content_type = response.info().get('Content-Length') total_size = -1 if content_type is not None: total_size = int(content_type.strip()) count = 0 while True: chunk = response.read(chunk_size) count += 1 if reporthook is not None: reporthook(count, chunk_size, total_size) if chunk: yield chunk else: break with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd: for chunk in chunk_read(response, reporthook=reporthook): fd.write(chunk) else: from six.moves.urllib.request import urlretrieve def _extract_archive(file_path, path='.', archive_format='auto'): """Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. # Arguments file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. # Returns True if a match was found and an archive extraction was completed, False otherwise. """ if archive_format is None: return False if archive_format is 'auto': archive_format = ['tar', 'zip'] if isinstance(archive_format, six.string_types): archive_format = [archive_format] for archive_type in archive_format: if archive_type is 'tar': open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type is 'zip': open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False def get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None): """Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.aadeeplearning`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.aadeeplearning/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. # Arguments fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of 'extract'. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the AADeepLearning cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are 'md5', 'sha256', and 'auto'. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the [AADeepLearning Directory](/faq/#where-is-the-aadeeplearning-configuration-filed-stored). # Returns Path to the downloaded file """ # noqa if cache_dir is None: cache_dir = os.path.join(os.path.expanduser('~'), '.aadeeplearning') if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = 'md5' datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join('/tmp', '.aadeeplearning') datadir = os.path.join(datadir_base, cache_subdir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): # File found; verify integrity if a hash was provided. if file_hash is not None: if not validate_file(fpath, file_hash, algorithm=hash_algorithm): print('A local file was found, but it seems to be ' 'incomplete or outdated because the ' + hash_algorithm + ' file hash does not match the original value of ' + file_hash + ' so we will re-download the data.') download = True else: download = True if download: print('Downloading data from', origin) class ProgressTracker(object): # Maintain progbar for the lifetime of download. # This design was chosen for Python 2.7 compatibility. progbar = None def dl_progress(count, block_size, total_size): if ProgressTracker.progbar is None: if total_size is -1: total_size = None ProgressTracker.progbar = Progbar(total_size) else: ProgressTracker.progbar.update(count * block_size) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt): if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format='tar') return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath def _hash_file(fpath, algorithm='sha256', chunk_size=65535): """Calculates a file sha256 or md5 hash. # Example ```python >>> from aadeeplearning.data_utils import _hash_file >>> _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` # Arguments fpath: path to the file being validated algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. # Returns The file hash """ if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest() def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535): """Validates a file against a sha256 or md5 hash. # Arguments fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. # Returns Whether the file is valid """ if ((algorithm is 'sha256') or (algorithm is 'auto' and len(file_hash) is 64)): hasher = 'sha256' else: hasher = 'md5' if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False class Sequence(object): """Base object for fitting to a sequence of data, such as a dataset. Every `Sequence` must implement the `__getitem__` and the `__len__` methods. If you want to modify your dataset between epochs you may implement `on_epoch_end`. The method `__getitem__` should return a complete batch. # Notes `Sequence` are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. # Examples ```python from skimage.io import imread from skimage.transform import resize import numpy as np # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. class CIFAR10Sequence(Sequence): def __init__(self, x_set, y_set, batch_size): self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): return int(np.ceil(len(self.x) / float(self.batch_size))) def __getitem__(self, idx): batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ``` """ @abstractmethod def __getitem__(self, index): """Gets batch at position `index`. # Arguments index: position of the batch in the Sequence. # Returns A batch """ raise NotImplementedError @abstractmethod def __len__(self): """Number of batch in the Sequence. # Returns The number of batches in the Sequence. """ raise NotImplementedError def on_epoch_end(self): """Method called at the end of every epoch. """ pass def __iter__(self): """Create a generator that iterate over the Sequence.""" for item in (self[i] for i in range(len(self))): yield item # Global variables to be shared across processes _SHARED_SEQUENCES = {} # We use a Value to provide unique id to different processes. _SEQUENCE_COUNTER = None def init_pool(seqs): global _SHARED_SEQUENCES _SHARED_SEQUENCES = seqs def get_index(uid, i): """Get the value from the Sequence `uid` at index `i`. To allow multiple Sequences to be used at the same time, we use `uid` to get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. # Arguments uid: int, Sequence identifier i: index # Returns The value at index `i`. """ return _SHARED_SEQUENCES[uid][i] class SequenceEnqueuer(object): """Base class to enqueue inputs. The task of an Enqueuer is to use parallelism to speed up preprocessing. This is done with processes or threads. # Examples ```python enqueuer = SequenceEnqueuer(...) enqueuer.start() datas = enqueuer.get() for data in datas: # Use the inputs; training, evaluating, predicting. # ... stop sometime. enqueuer.close() ``` The `enqueuer.get()` should be an infinite stream of datas. """ def __init__(self, sequence, use_multiprocessing=False): self.sequence = sequence self.use_multiprocessing = use_multiprocessing global _SEQUENCE_COUNTER if _SEQUENCE_COUNTER is None: try: _SEQUENCE_COUNTER = mp.Value('i', 0) except OSError: # In this case the OS does not allow us to use # multiprocessing. We resort to an int # for enqueuer indexing. _SEQUENCE_COUNTER = 0 if isinstance(_SEQUENCE_COUNTER, int): self.uid = _SEQUENCE_COUNTER _SEQUENCE_COUNTER += 1 else: # Doing Multiprocessing.Value += x is not process-safe. with _SEQUENCE_COUNTER.get_lock(): self.uid = _SEQUENCE_COUNTER.value _SEQUENCE_COUNTER.value += 1 self.workers = 0 self.executor_fn = None self.queue = None self.run_thread = None self.stop_signal = None def is_running(self): return self.stop_signal is not None and not self.stop_signal.is_set() def start(self, workers=1, max_queue_size=10): """Start the handler's workers. # Arguments workers: number of worker threads max_queue_size: queue size (when full, workers could block on `put()`) """ if self.use_multiprocessing: self.executor_fn = self._get_executor_init(workers) else: # We do not need the init since it's threads. self.executor_fn = lambda _: ThreadPool(workers) self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start() def _send_sequence(self): """Send current Iterable to all workers.""" # For new processes that may spawn _SHARED_SEQUENCES[self.uid] = self.sequence def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. # Arguments timeout: maximum time to wait on `thread.join()` """ self.stop_signal.set() with self.queue.mutex: self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() self.run_thread.join(timeout) _SHARED_SEQUENCES[self.uid] = None @abstractmethod def _run(self): """Submits request to the executor and queue the `Future` objects.""" raise NotImplementedError @abstractmethod def _get_executor_init(self, workers): """Get the Pool initializer for multiprocessing. # Returns Function, a Function to initialize the pool """ raise NotImplementedError @abstractmethod def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Returns Generator yielding tuples `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ raise NotImplementedError class OrderedEnqueuer(SequenceEnqueuer): """Builds a Enqueuer from a Sequence. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. # Arguments sequence: A `aadeeplearning.utils.data_utils.Sequence` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch """ def __init__(self, sequence, use_multiprocessing=False, shuffle=False): super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing) self.shuffle = shuffle def _get_executor_init(self, workers): """Get the Pool initializer for multiprocessing. # Returns Function, a Function to initialize the pool """ return lambda seqs: mp.Pool(workers, initializer=init_pool, initargs=(seqs,)) def _wait_queue(self): """Wait for the queue to be empty.""" while True: time.sleep(0.1) if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set(): return def _run(self): """Submits request to the executor and queue the `Future` objects.""" sequence = list(range(len(self.sequence))) self._send_sequence() # Share the initial sequence while True: if self.shuffle: random.shuffle(sequence) with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: for i in sequence: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(get_index, (self.uid, i)), block=True) # Done with the current epoch, waiting for the final batches self._wait_queue() if self.stop_signal.is_set(): # We're done return # Call the internal on epoch end. self.sequence.on_epoch_end() self._send_sequence() # Update the pool def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Yields The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except Exception as e: self.stop() six.reraise(*sys.exc_info()) def init_pool_generator(gens, random_seed=None): global _SHARED_SEQUENCES _SHARED_SEQUENCES = gens if random_seed is not None: ident = mp.current_process().ident np.random.seed(random_seed + ident) def next_sample(uid): """Get the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. # Arguments uid: int, generator identifier # Returns The next value of generator `uid`. """ return six.next(_SHARED_SEQUENCES[uid]) class GeneratorEnqueuer(SequenceEnqueuer): """Builds a queue out of a data generator. The provided generator can be finite in which case the class will throw a `StopIteration` exception. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. # Arguments generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each worker. """ def __init__(self, sequence, use_multiprocessing=False, wait_time=None, random_seed=None): super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing) self.random_seed = random_seed if wait_time is not None: warnings.warn('`wait_time` is not used anymore.', DeprecationWarning) def _get_executor_init(self, workers): """Get the Pool initializer for multiprocessing. # Returns Function, a Function to initialize the pool """ return lambda seqs: mp.Pool(workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed)) def _run(self): """Submits request to the executor and queue the `Future` objects.""" self._send_sequence() # Share the initial generator with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: while True: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(next_sample, (self.uid,)), block=True) def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Yields The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except StopIteration: # Special case for finite generators last_ones = [] while self.queue.qsize() > 0: last_ones.append(self.queue.get(block=True)) # Wait for them to complete list(map(lambda f: f.wait(), last_ones)) # Keep the good ones last_ones = [future.get() for future in last_ones if future.successful()] for inputs in last_ones: if inputs is not None: yield inputs except Exception as e: self.stop() if 'generator already executing' in str(e): raise RuntimeError( "Your generator is NOT thread-safe." "AADeepLearning requires a thread-safe generator when" "`use_multiprocessing=False, workers > 1`." "For more information see issue #1638.") six.reraise(*sys.exc_info())
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/data_utils.py
data_utils.py
"""Fashion-MNIST dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os from .data_utils import get_file import numpy as np def load_data(): """Loads the Fashion-MNIST dataset. # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = os.path.join('datasets', 'fashion-mnist') base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' files = ['train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'] paths = [] for fname in files: paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname)) with gzip.open(paths[0], 'rb') as lbpath: y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[1], 'rb') as imgpath: x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) with gzip.open(paths[2], 'rb') as lbpath: y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[3], 'rb') as imgpath: x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28) return (x_train, y_train), (x_test, y_test)
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/fashion_mnist.py
fashion_mnist.py
"""IMDB sentiment classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from .data_utils import get_file # from ..preprocessing.sequence import _remove_long_seq import numpy as np import json import warnings def load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3, **kwargs): """Loads the IMDB dataset. # Arguments path: where to cache the data (relative to `~/.aadeeplearning/dataset`). num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: sequences longer than this will be filtered out. seed: random seed for sample shuffling. start_char: The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. oov_char: words that were cut out because of the `num_words` or `skip_top` limit will be replaced with this character. index_from: index actual words with this index and higher. # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. # Raises ValueError: in case `maxlen` is so low that no input sequence could be kept. Note that the 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped. """ # Legacy support if 'nb_words' in kwargs: warnings.warn('The `nb_words` argument in `load_data` ' 'has been renamed `num_words`.') num_words = kwargs.pop('nb_words') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) path = get_file(path, origin='https://s3.amazonaws.com/text-datasets/imdb.npz', file_hash='599dadb1135973df5b59232a0e9a887c') with np.load(path) as f: x_train, labels_train = f['x_train'], f['y_train'] x_test, labels_test = f['x_test'], f['y_test'] np.random.seed(seed) indices = np.arange(len(x_train)) np.random.shuffle(indices) x_train = x_train[indices] labels_train = labels_train[indices] indices = np.arange(len(x_test)) np.random.shuffle(indices) x_test = x_test[indices] labels_test = labels_test[indices] xs = np.concatenate([x_train, x_test]) labels = np.concatenate([labels_train, labels_test]) if start_char is not None: xs = [[start_char] + [w + index_from for w in x] for x in xs] elif index_from: xs = [[w + index_from for w in x] for x in xs] # if maxlen: # xs, labels = _remove_long_seq(maxlen, xs, labels) # if not xs: # raise ValueError('After filtering for sequences shorter than maxlen=' + # str(maxlen) + ', no sequence was kept. ' # 'Increase maxlen.') if not num_words: num_words = max([max(x) for x in xs]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: # 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: xs = [[w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs] else: xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = len(x_train) x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) return (x_train, y_train), (x_test, y_test) def get_word_index(path='imdb_word_index.json'): """Retrieves the dictionary mapping words to word indices. # Arguments path: where to cache the data (relative to `~/.aadeeplearning/dataset`). # Returns The word index dictionary. """ path = get_file( path, origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json', file_hash='bfafd718b763782e994055a2d397834f') with open(path) as f: return json.load(f)
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/imdb.py
imdb.py
"""CIFAR10 small images classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from .cifar import load_batch from .data_utils import get_file # from .. import backend as K import numpy as np import os def load_data(): """Loads CIFAR10 dataset. # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = 'cifar-10-batches-py' origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' path = get_file(dirname, origin=origin, untar=True) num_train_samples = 50000 x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.empty((num_train_samples,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path, 'data_batch_' + str(i)) (x_train[(i - 1) * 10000: i * 10000, :, :, :], y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath) fpath = os.path.join(path, 'test_batch') x_test, y_test = load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) # if K.image_data_format() == 'channels_last': # x_train = x_train.transpose(0, 2, 3, 1) # x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/cifar10.py
cifar10.py
"""Python utilities required by AADeepLearning.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import binascii import numpy as np import time import sys import six import marshal import types as python_types import inspect import codecs import collections _GLOBAL_CUSTOM_OBJECTS = {} class CustomObjectScope(object): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. # Example Consider a custom object `MyObject` (e.g. a class): ```python with CustomObjectScope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` """ def __init__(self, *args): self.custom_objects = args self.backup = None def __enter__(self): self.backup = _GLOBAL_CUSTOM_OBJECTS.copy() for objects in self.custom_objects: _GLOBAL_CUSTOM_OBJECTS.update(objects) return self def __exit__(self, *args, **kwargs): _GLOBAL_CUSTOM_OBJECTS.clear() _GLOBAL_CUSTOM_OBJECTS.update(self.backup) def custom_object_scope(*args): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Convenience wrapper for `CustomObjectScope`. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. # Example Consider a custom object `MyObject` ```python with custom_object_scope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` # Arguments *args: Variable length list of dictionaries of name, class pairs to add to custom objects. # Returns Object of type `CustomObjectScope`. """ return CustomObjectScope(*args) def get_custom_objects(): """Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using `custom_object_scope` is preferred, but `get_custom_objects` can be used to directly access `_GLOBAL_CUSTOM_OBJECTS`. # Example ```python get_custom_objects().clear() get_custom_objects()['MyObject'] = MyObject ``` # Returns Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`). """ return _GLOBAL_CUSTOM_OBJECTS def serialize_aadeeplearning_object(instance): if instance is None: return None if hasattr(instance, 'get_config'): return { 'class_name': instance.__class__.__name__, 'config': instance.get_config() } if hasattr(instance, '__name__'): return instance.__name__ else: raise ValueError('Cannot serialize', instance) def deserialize_aadeeplearning_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'): if isinstance(identifier, dict): # In this case we are dealing with a AADeepLearning config dictionary. config = identifier if 'class_name' not in config or 'config' not in config: raise ValueError('Improper config format: ' + str(config)) class_name = config['class_name'] if custom_objects and class_name in custom_objects: cls = custom_objects[class_name] elif class_name in _GLOBAL_CUSTOM_OBJECTS: cls = _GLOBAL_CUSTOM_OBJECTS[class_name] else: module_objects = module_objects or {} cls = module_objects.get(class_name) if cls is None: raise ValueError('Unknown ' + printable_module_name + ': ' + class_name) if hasattr(cls, 'from_config'): custom_objects = custom_objects or {} if has_arg(cls.from_config, 'custom_objects'): return cls.from_config( config['config'], custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) + list(custom_objects.items()))) with CustomObjectScope(custom_objects): return cls.from_config(config['config']) else: # Then `cls` may be a function returning a class. # in this case by convention `config` holds # the kwargs of the function. custom_objects = custom_objects or {} with CustomObjectScope(custom_objects): return cls(**config['config']) elif isinstance(identifier, six.string_types): function_name = identifier if custom_objects and function_name in custom_objects: fn = custom_objects.get(function_name) elif function_name in _GLOBAL_CUSTOM_OBJECTS: fn = _GLOBAL_CUSTOM_OBJECTS[function_name] else: fn = module_objects.get(function_name) if fn is None: raise ValueError('Unknown ' + printable_module_name + ':' + function_name) return fn else: raise ValueError('Could not interpret serialized ' + printable_module_name + ': ' + identifier) def func_dump(func): """Serializes a user defined function. # Arguments func: the function to serialize. # Returns A tuple `(code, defaults, closure)`. """ raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, 'base64').decode('ascii') defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure def func_load(code, defaults=None, closure=None, globs=None): """Deserializes a user defined function. # Arguments code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. globs: dictionary of global objects. # Returns A function object. """ if isinstance(code, (tuple, list)): # unpack previous dump code, defaults, closure = code if isinstance(defaults, list): defaults = tuple(defaults) def ensure_value_to_cell(value): """Ensures that a value is converted to a python cell object. # Arguments value: Any value that needs to be casted to the cell type # Returns A value wrapped as a cell object (see function "func_load") """ def dummy_fn(): value # just access it so it gets captured in .__closure__ cell_value = dummy_fn.__closure__[0] if not isinstance(value, type(cell_value)): return cell_value else: return value if closure is not None: closure = tuple(ensure_value_to_cell(_) for _ in closure) try: raw_code = codecs.decode(code.encode('ascii'), 'base64') code = marshal.loads(raw_code) except (UnicodeEncodeError, binascii.Error, ValueError): # backwards compatibility for models serialized prior to 2.1.2 raw_code = code.encode('raw_unicode_escape') code = marshal.loads(raw_code) if globs is None: globs = globals() return python_types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure) def has_arg(fn, name, accept_all=False): """Checks if a callable accepts a given keyword argument. For Python 2, checks if there is an argument with the given name. For Python 3, checks if there is an argument with the given name, and also whether this argument can be called with a keyword (i.e. if it is not a positional-only argument). # Arguments fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name` but the function accepts a `**kwargs` argument. # Returns bool, whether `fn` accepts a `name` keyword argument. """ if sys.version_info < (3,): arg_spec = inspect.getargspec(fn) if accept_all and arg_spec.keywords is not None: return True return (name in arg_spec.args) elif sys.version_info < (3, 3): arg_spec = inspect.getfullargspec(fn) if accept_all and arg_spec.varkw is not None: return True return (name in arg_spec.args or name in arg_spec.kwonlyargs) else: signature = inspect.signature(fn) parameter = signature.parameters.get(name) if parameter is None: if accept_all: for param in signature.parameters.values(): if param.kind == inspect.Parameter.VAR_KEYWORD: return True return False return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)) class Progbar(object): """Displays a progress bar. # Arguments target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) stateful_metrics: Iterable of string names of metrics that should *not* be averaged over time. Metrics in this list will be displayed as-is. All others will be averaged by the progbar before display. interval: Minimum visual progress update interval (in seconds). """ def __init__(self, target, width=30, verbose=1, interval=0.05, stateful_metrics=None): self.target = target self.width = width self.verbose = verbose self.interval = interval if stateful_metrics: self.stateful_metrics = set(stateful_metrics) else: self.stateful_metrics = set() self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) or 'ipykernel' in sys.modules) self._total_width = 0 self._seen_so_far = 0 self._values = collections.OrderedDict() self._start = time.time() self._last_update = 0 def update(self, current, values=None): """Updates the progress bar. # Arguments current: Index of current step. values: List of tuples: `(name, value_for_last_step)`. If `name` is in `stateful_metrics`, `value_for_last_step` will be displayed as-is. Else, an average of the metric over time will be displayed. """ values = values or [] for k, v in values: if k not in self.stateful_metrics: if k not in self._values: self._values[k] = [v * (current - self._seen_so_far), current - self._seen_so_far] else: self._values[k][0] += v * (current - self._seen_so_far) self._values[k][1] += (current - self._seen_so_far) else: # Stateful metrics output a numeric value. This representation # means "take an average from a single value" but keeps the # numeric formatting. self._values[k] = [v, 1] self._seen_so_far = current now = time.time() info = ' - %.0fs' % (now - self._start) if self.verbose == 1: if (now - self._last_update < self.interval and self.target is not None and current < self.target): return prev_total_width = self._total_width if self._dynamic_display: sys.stdout.write('\b' * prev_total_width) sys.stdout.write('\r') else: sys.stdout.write('\n') if self.target is not None: numdigits = int(np.floor(np.log10(self.target))) + 1 barstr = '%%%dd/%d [' % (numdigits, self.target) bar = barstr % current prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: bar += ('=' * (prog_width - 1)) if current < self.target: bar += '>' else: bar += '=' bar += ('.' * (self.width - prog_width)) bar += ']' else: bar = '%7d/Unknown' % current self._total_width = len(bar) sys.stdout.write(bar) if current: time_per_unit = (now - self._start) / current else: time_per_unit = 0 if self.target is not None and current < self.target: eta = time_per_unit * (self.target - current) if eta > 3600: eta_format = ('%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60)) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: eta_format = '%ds' % eta info = ' - ETA: %s' % eta_format else: if time_per_unit >= 1: info += ' %.0fs/step' % time_per_unit elif time_per_unit >= 1e-3: info += ' %.0fms/step' % (time_per_unit * 1e3) else: info += ' %.0fus/step' % (time_per_unit * 1e6) for k in self._values: info += ' - %s:' % k if isinstance(self._values[k], list): avg = np.mean( self._values[k][0] / max(1, self._values[k][1])) if abs(avg) > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg else: info += ' %s' % self._values[k] self._total_width += len(info) if prev_total_width > self._total_width: info += (' ' * (prev_total_width - self._total_width)) if self.target is not None and current >= self.target: info += '\n' sys.stdout.write(info) sys.stdout.flush() elif self.verbose == 2: if self.target is None or current >= self.target: for k in self._values: info += ' - %s:' % k avg = np.mean( self._values[k][0] / max(1, self._values[k][1])) if avg > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg info += '\n' sys.stdout.write(info) sys.stdout.flush() self._last_update = now def add(self, n, values=None): self.update(self._seen_so_far + n, values) def to_list(x, allow_tuple=False): """Normalizes a list/tensor into a list. If a tensor is passed, we return a list of size 1 containing the tensor. # Arguments x: target object to be normalized. allow_tuple: If False and x is a tuple, it will be converted into a list with a single element (the tuple). Else converts the tuple to a list. # Returns A list. """ if isinstance(x, list): return x if allow_tuple and isinstance(x, tuple): return list(x) return [x] def unpack_singleton(x): """Gets the first element if the iterable has only one value. Otherwise return the iterable. # Argument: x: A list or tuple. # Returns: The same iterable or the first element. """ if len(x) == 1: return x[0] return x def object_list_uid(object_list): object_list = to_list(object_list) return ', '.join([str(abs(id(x))) for x in object_list]) def is_all_none(iterable_or_element): iterable = to_list(iterable_or_element, allow_tuple=True) for element in iterable: if element is not None: return False return True def slice_arrays(arrays, start=None, stop=None): """Slices an array or list of arrays. This takes an array-like, or a list of array-likes, and outputs: - arrays[start:stop] if `arrays` is an array-like - [x[start:stop] for x in arrays] if `arrays` is a list Can also work on list/array of indices: `_slice_arrays(x, indices)` # Arguments arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if `start` was a list. # Returns A slice of the array(s). """ if arrays is None: return [None] elif isinstance(arrays, list): if hasattr(start, '__len__'): # hdf5 datasets only support list objects as indices if hasattr(start, 'shape'): start = start.tolist() return [None if x is None else x[start] for x in arrays] else: return [None if x is None else x[start:stop] for x in arrays] else: if hasattr(start, '__len__'): if hasattr(start, 'shape'): start = start.tolist() return arrays[start] elif hasattr(start, '__getitem__'): return arrays[start:stop] else: return [None] def transpose_shape(shape, target_format, spatial_axes): """Converts a tuple or a list to the correct `data_format`. It does so by switching the positions of its elements. # Arguments shape: Tuple or list, often representing shape, corresponding to `'channels_last'`. target_format: A string, either `'channels_first'` or `'channels_last'`. spatial_axes: A tuple of integers. Correspond to the indexes of the spatial axes. For example, if you pass a shape representing (batch_size, timesteps, rows, cols, channels), then `spatial_axes=(2, 3)`. # Returns A tuple or list, with the elements permuted according to `target_format`. # Example ```python >>> from aadeeplearning.utils.generic_utils import transpose_shape >>> transpose_shape((16, 128, 128, 32),'channels_first', spatial_axes=(1, 2)) (16, 32, 128, 128) >>> transpose_shape((16, 128, 128, 32), 'channels_last', spatial_axes=(1, 2)) (16, 128, 128, 32) >>> transpose_shape((128, 128, 32), 'channels_first', spatial_axes=(0, 1)) (32, 128, 128) ``` # Raises ValueError: if `value` or the global `data_format` invalid. """ if target_format == 'channels_first': new_values = shape[:spatial_axes[0]] new_values += (shape[-1],) new_values += tuple(shape[x] for x in spatial_axes) if isinstance(shape, list): return list(new_values) return new_values elif target_format == 'channels_last': return shape else: raise ValueError('The `data_format` argument must be one of ' '"channels_first", "channels_last". Received: ' + str(target_format))
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/generic_utils.py
generic_utils.py
""" MNIST handwritten digits dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from .data_utils import get_file import numpy as np def load_data(path='mnist.npz'): """Loads the MNIST dataset. # Arguments path: path where to cache the dataset locally (relative to ~/.aadeeplearning/datasets). # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ path = get_file(path, origin='https://s3.amazonaws.com/img-datasets/mnist.npz', file_hash='8a61469f7ea1b51cbae51d4f78837e45') f = np.load(path) x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] f.close() return (x_train, y_train), (x_test, y_test)
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/mnist.py
mnist.py
"""CIFAR100 small images classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from .cifar import load_batch from .data_utils import get_file # from .. import backend as K import numpy as np import os def load_data(label_mode='fine'): """Loads CIFAR100 dataset. # Arguments label_mode: one of "fine", "coarse". # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. # Raises ValueError: in case of invalid `label_mode`. """ if label_mode not in ['fine', 'coarse']: raise ValueError('`label_mode` must be one of `"fine"`, `"coarse"`.') dirname = 'cifar-100-python' origin = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' path = get_file(dirname, origin=origin, untar=True) fpath = os.path.join(path, 'train') x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels') fpath = os.path.join(path, 'test') x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels') y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) # if K.image_data_format() == 'channels_last': # x_train = x_train.transpose(0, 2, 3, 1) # x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/cifar100.py
cifar100.py
# -*- coding: utf-8 -*- """Utilities common to CIFAR10 and CIFAR100 datasets. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from six.moves import cPickle def load_batch(fpath, label_key='labels'): """Internal utility for parsing CIFAR data. # Arguments fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. # Returns A tuple `(data, labels)`. """ with open(fpath, 'rb') as f: if sys.version_info < (3,): d = cPickle.load(f) else: d = cPickle.load(f, encoding='bytes') # decode utf8 d_decoded = {} for k, v in d.items(): d_decoded[k.decode('utf8')] = v d = d_decoded data = d['data'] labels = d[label_key] data = data.reshape(data.shape[0], 3, 32, 32) return data, labels
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/cifar.py
cifar.py
from __future__ import absolute_import from . import mnist from . import np_utils
AAdeepLearning
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/__init__.py
__init__.py