text
stringlengths 4
288k
| id
stringlengths 17
110
| metadata
dict | __index_level_0__
int64 0
47
|
---|---|---|---|
@echo off
if defined _OLD_VIRTUAL_PROMPT (
set "PROMPT=%_OLD_VIRTUAL_PROMPT%"
)
set _OLD_VIRTUAL_PROMPT=
if defined _OLD_VIRTUAL_PYTHONHOME (
set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%"
set _OLD_VIRTUAL_PYTHONHOME=
)
if defined _OLD_VIRTUAL_PATH (
set "PATH=%_OLD_VIRTUAL_PATH%"
)
set _OLD_VIRTUAL_PATH=
set VIRTUAL_ENV=
:END
| Django-locallibrary/env/Scripts/deactivate.bat/0 | {
"file_path": "Django-locallibrary/env/Scripts/deactivate.bat",
"repo_id": "Django-locallibrary",
"token_count": 178
} | 33 |
//hire me on your next big Machine Learning Project on this link > https://www.mql5.com/en/job/new?prefered=omegajoctan
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
//don't forget to change this directory
#include "C:\Users\Omega Joctan\AppData\Roaming\MetaQuotes\Terminal\892B47EBC091D6EF95E3961284A76097\MQL5\Experts\DataScience\LinearRegression\LinearRegressionLib.mqh";
//---
class CLogisticRegression: public CMatrixRegression
{
private:
double e; //Euler's number
string m_AllDataArrayString[];
double m_AllDataArrayDouble[];
string ColumnsToEncode[];
string ColumnsToFix[];
string MembersToEncode[];
double m_yintercept; //constant for all values of x
int each_arrsize;
protected:
int single_rowtotal;
void FixMissingValuesFunction(double& Arr[],int column_number,int index);
void EncodeLabelFunction(string& Arr[],double& output_arr[], int column_number, int index);
void WriteToCSV(int &Predicted[],string &date_time[],string file_name,string delimiter=",");
public:
CLogisticRegression(void);
~CLogisticRegression(void);
//These should be called before the Init
void FixMissingValues(string columns);
void LabelEncoder(string columns, string members);
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
void Init(string filename=NULL,string delimiter=",",int y_column=1,string x_columns="3,4,5,6,7,8",double train_size_split = 0.7,bool isdebug=true);
double LogisticRegressionMain(double& accuracy);
void ConfusionMatrix(double &y[], int &Predicted_y[], double& accuracy,bool print=true);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CLogisticRegression::CLogisticRegression(void)
{
e = 2.718281828;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CLogisticRegression::~CLogisticRegression(void)
{
FileClose(m_handle);
ArrayFree(m_AllDataArrayDouble);
ArrayFree(ColumnsToEncode);
ArrayFree(ColumnsToFix);
ArrayFree(MembersToEncode);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CLogisticRegression::FixMissingValues(string columns)
{
ushort separator = StringGetCharacter(",",0);
StringSplit(columns,separator,ColumnsToFix); //Store the columns numbers to this Array
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CLogisticRegression::LabelEncoder(string columns,string members)
{
ushort separator = StringGetCharacter(",",0);
StringSplit(columns,separator,ColumnsToEncode); //Store the columns numbers to this Array
separator = StringGetCharacter(",",0);
StringSplit(members,separator,MembersToEncode); //Store the columns numbers to this Array
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CLogisticRegression::FixMissingValuesFunction(double& Arr[],int column_number, int index)
{
// the index argument is for getting the name of the column
for (int i=0; i<ArraySize(ColumnsToFix); i++)
if ((int)ColumnsToFix[i]!=column_number) continue; //the column to fix has to match the column on the input
else
{
Print("FixMissingValues Function triggered ","\n starting to Fix Missing Values in ",DataColumnNames[index]!=""?DataColumnNames[index]: (string)column_number," Column ");
//from the function >>> void CLogisticRegression::FixMissingValues(double &Arr[])
int counter=0; double mean=0, total=0;
for (int j=0; j<ArraySize(Arr); j++) //first step is to find the mean of the non zero values
if (Arr[j]!=0)
{
counter++;
total += Arr[j];
}
mean = total/counter; //all the values divided by their total number
if (m_debug)
{
Print("mean ",MathRound(mean)," before Arr");
ArrayPrint(Arr);
}
for (int j=0; j<ArraySize(Arr); j++)
{
if (Arr[j]==0)
{
Arr[j] = MathRound(mean); //replace zero values in array
}
}
if (m_debug)
{
Print("After Arr");
ArrayPrint(Arr);
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CLogisticRegression::EncodeLabelFunction(string& Arr[], double& output_arr[], int column_number,int index) //since the input is a string The Arr[] has to be a string
{
// the index argument is for getting the name of the column
double EncodeTo[];
ArrayResize(EncodeTo,ArraySize(Arr)); //Make the Encode To Array Same size as source
for (int i=0; i<ArraySize(ColumnsToEncode); i++)
if ((int)ColumnsToEncode[i]!=column_number) continue; //the column to fix has to matches the column on the input
else
{
Print("Encode Label Function triggered ","\n starting to Encode Values in ",DataColumnNames[index]!="" ? DataColumnNames[index]: (string)column_number," Column ");
//from >>>> void CLogisticRegression::LabelEncoder(string &src[],int &EncodeTo[],string members="male,female")
int binary=0;
for(int x=0; x<ArraySize(MembersToEncode); x++) // loop the members array
{
string val = MembersToEncode[x];
binary = x; //binary to assign to a member
int label_counter = 0;
for (int y=0; y<ArraySize(Arr); y++) //source array
{
string source_val = Arr[y];
if (val == source_val)
{
EncodeTo[y] = binary;
label_counter++;
}
}
Print(MembersToEncode[binary]," total =",label_counter," Encoded To = ",binary);
ArrayCopy(output_arr,EncodeTo);
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CLogisticRegression::Init(string filename=NULL,string delimiter=",",int y_column=1,string x_columns="3,4,5,6,7,8",double train_size_split = 0.7,bool isdebug=true)
{
//---
LrInit(y_column,x_columns,filename,delimiter,train_size_split,false);
LinearRegMain();
//---
single_rowtotal = rows_total/x_columns_chosen;
}
//+------------------------------------------------------------------+
//| The Main Multiple regression Function for scripts |
//+------------------------------------------------------------------+
double CLogisticRegression::LogisticRegressionMain(double &accuracy)
{
each_arrsize = (int) MathFloor(single_rowtotal*(1-m_train_split));
int TestPredicted[];
ArrayResize(TestPredicted,each_arrsize);
for (int i=0; i<each_arrsize; i++)
{
double sigmoid = 1/(1+MathPow(e,-TestYDataSet[i]));
TestPredicted[i] = (int) round(sigmoid); //round the values to give us the actual 0 or 1
}
//---
ConfusionMatrix(TestYDataSet,TestPredicted,accuracy);
string dates[];
GetColumnDatatoArray(1,dates);
string Temp_dates[];
ArrayResize(Temp_dates,each_arrsize);
int counter = 0;
for (int i=0; i<ArraySize(dates); i++)
if (i >= (int) MathCeil(single_rowtotal*m_train_split))
Temp_dates[counter++] = dates[i];
//---
ArrayFree(dates);
ArrayCopy(dates,Temp_dates);
ArrayFree(Temp_dates);
//---
return (accuracy);
}
//+------------------------------------------------------------------+
//| The Main Multiple regression Function for EA's |
//+------------------------------------------------------------------+
void CLogisticRegression::ConfusionMatrix(double &y[], int &Predicted_y[], double& accuracy,bool print=true)
{
int TP=0, TN=0, FP=0, FN=0;
for (int i=0; i<ArraySize(y); i++)
{
if ((int)y[i]==Predicted_y[i] && Predicted_y[i]==1)
TP++;
if ((int)y[i]==Predicted_y[i] && Predicted_y[i]==0)
TN++;
if (Predicted_y[i]==1 && (int)y[i]==0)
FP++;
if (Predicted_y[i]==0 && (int)y[i]==1)
FN++;
}
if (print)
Print("Confusion Matrix \n ","[ ",TN," ",FP," ]","\n"," [ ",FN," ",TP," ] ");
accuracy = (double)(TN+TP) / (double)(TP+TN+FP+FN);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#define VAR_NAME(v) ""+#v+""
void CLogisticRegression::WriteToCSV(int &Predicted[],string &date_time[],string file_name,string delimiter=",")
{
FileDelete(file_name);
int handle = FileOpen(file_name,FILE_WRITE|FILE_READ|FILE_CSV|FILE_ANSI,delimiter);
if (handle == INVALID_HANDLE)
Print(__FUNCTION__," Invalid csv handle err=",GetLastError());
//---
if (handle>0)
{
FileWrite(handle,VAR_NAME(Predicted),VAR_NAME(date_time));
for (int i=0; i<ArraySize(Predicted); i++)
{
string str1 = IntegerToString(Predicted[i]),
str2 = date_time[i];
FileWrite(handle,str1,str2);
}
}
FileClose(handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| LogisticRegression-MQL5-and-python/LogisticRegression mql5/LogisticRegressionLib.mqh/0 | {
"file_path": "LogisticRegression-MQL5-and-python/LogisticRegression mql5/LogisticRegressionLib.mqh",
"repo_id": "LogisticRegression-MQL5-and-python",
"token_count": 5886
} | 34 |
//+------------------------------------------------------------------+
//| CTruncatedSVD.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include "base.mqh"
#include <MALE5\MqPlotLib\plots.mqh>
class CTruncatedSVD
{
CPlots plt;
uint m_components;
ulong n_features;
matrix components_;
vector mean;
vector explained_variance_;
public:
CTruncatedSVD(uint k=0);
~CTruncatedSVD(void);
matrix fit_transform(matrix& X);
matrix transform(matrix &X);
vector transform(vector &X);
ulong _select_n_components(vector &singular_values);
};
//+------------------------------------------------------------------+
//| Once the k value is left to default value of zero, the function |
//| _select_n_components will be used to find the best number of |
//| components to use |
//+------------------------------------------------------------------+
CTruncatedSVD::CTruncatedSVD(uint k=0)
:m_components(k)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CTruncatedSVD::~CTruncatedSVD(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix CTruncatedSVD::fit_transform(matrix &X)
{
n_features = X.Cols();
if (m_components>n_features)
{
printf("%s Number of dimensions K[%d] is supposed to be <= number of features %d",__FUNCTION__,m_components,n_features);
this.m_components = (uint)n_features;
}
//--- Center the data (subtract mean)
this.mean = X.Mean(0);
matrix X_centered = BaseDimRed::subtract(X, this.mean);
//--- Compute the covariance matrix
BaseDimRed::ReplaceNaN(X_centered);
matrix cov_matrix = X_centered.Cov(false);
//--- Perform SVD on the covariance matrix
matrix U={}, Vt={};
vector Sigma={};
BaseDimRed::ReplaceNaN(cov_matrix);
if (!cov_matrix.SVD(U,Vt,Sigma))
Print(__FUNCTION__," Line ",__LINE__," Failed to calculate SVD Err=",GetLastError());
if (m_components == 0)
{
m_components = (uint)this._select_n_components(Sigma);
Print(__FUNCTION__," Best value of K = ",m_components);
}
this.components_ = BaseDimRed::Slice(Vt, this.m_components).Transpose();
BaseDimRed::ReplaceNaN(this.components_);
if (MQLInfoInteger(MQL_DEBUG))
Print("components_T[",components_.Rows(),"X",components_.Cols(),"]\n",this.components_);
this.explained_variance_ = MathPow(BaseDimRed::Slice(Sigma, this.m_components), 2) / (X.Rows() - 1);
return X_centered.MatMul(components_);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix CTruncatedSVD::transform(matrix &X)
{
matrix X_centered = BaseDimRed::subtract(X, this.mean);
if (X.Cols()!=this.n_features)
{
printf("%s Inconsistent input X matrix size, It is supposed to be of size %d same as the matrix used under fit_transform",__FUNCTION__,n_features);
this.m_components = (uint)n_features;
}
return X_centered.MatMul(components_);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CTruncatedSVD::transform(vector &X)
{
matrix INPUT_MAT = MatrixExtend::VectorToMatrix(X, X.Size());
matrix OUTPUT_MAT = transform(INPUT_MAT);
return MatrixExtend::MatrixToVector(OUTPUT_MAT);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
ulong CTruncatedSVD::_select_n_components(vector &singular_values)
{
double total_variance = MathPow(singular_values.Sum(), 2);
vector explained_variance_ratio = MathPow(singular_values, 2).CumSum() / total_variance;
if (MQLInfoInteger(MQL_DEBUG))
Print(__FUNCTION__," Explained variance ratio ",explained_variance_ratio);
vector k(explained_variance_ratio.Size());
for (uint i=0; i<k.Size(); i++)
k[i] = i+1;
plt.Plot("Explained variance plot",k,explained_variance_ratio,"variance","components","Variance");
return explained_variance_ratio.ArgMax() + 1; //Choose k for maximum explained variance
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/Dimensionality Reduction/TruncatedSVD.mqh/0 | {
"file_path": "MALE5/Dimensionality Reduction/TruncatedSVD.mqh",
"repo_id": "MALE5",
"token_count": 2413
} | 35 |
//+------------------------------------------------------------------+
//| KNN_nearest_neighbors.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
bool isdebug = true;
//+------------------------------------------------------------------+
class CKNNNearestNeighbors
{
private:
uint k;
matrix<double> Matrix;
ulong m_rows, m_cols;
vector m_target;
vector m_classesVector;
matrix m_classesMatrix;
double Euclidean_distance(const vector &v1, const vector &v2);
vector ClassVector(); //global vector of target classes
void MatrixRemoveRow(matrix &mat, ulong row);
void VectorRemoveIndex(vector &v, ulong index);
double Mse(vector &A, vector &P);
public:
CKNNNearestNeighbors(matrix<double> &Matrix_, uint k_);
CKNNNearestNeighbors(matrix<double> &Matrix_);
~CKNNNearestNeighbors(void);
int KNNAlgorithm(vector &vector_);
vector CrossValidation_LOOCV(uint initial_k = 0, uint final_k=1); //Leave One out Cross Validation (LOOCV)
matrix ConfusionMatrix(vector &A,vector &P);
float TrainTest(double train_size = 0.7); //returns accuracy of the tested dataset
};
//+------------------------------------------------------------------+
CKNNNearestNeighbors:: CKNNNearestNeighbors(matrix<double> &Matrix_, uint k_)
{
k = k_;
if(k %2 ==0)
{
k = k+1;
if (isdebug)
printf("K = %d is an even number, It will be added by One so it becomes an odd Number %d", k_, k);
}
Matrix.Copy(Matrix_);
m_rows = Matrix.Rows();
m_cols = Matrix.Cols();
m_target = Matrix.Col(m_cols-1);
m_classesVector = ClassVector();
if (isdebug)
Print("classes vector | Neighbors ", m_classesVector);
}
//+------------------------------------------------------------------+
CKNNNearestNeighbors::CKNNNearestNeighbors(matrix<double> &Matrix_)
{
Matrix.Copy(Matrix_);
k = (int)round(MathSqrt(Matrix.Rows()));
k = k%2 ==0 ? k+1 : k; //make sure the value of k ia an odd number
m_rows = Matrix.Rows();
m_cols = Matrix.Cols();
m_target = Matrix.Col(m_cols-1);
m_classesVector = ClassVector();
Print("classes vector | Neighbors ", m_classesVector);
}
//+------------------------------------------------------------------+
CKNNNearestNeighbors::~CKNNNearestNeighbors(void)
{
ZeroMemory(k);
ZeroMemory(m_classesVector);
ZeroMemory(m_classesMatrix);
}
//+------------------------------------------------------------------+
int CKNNNearestNeighbors::KNNAlgorithm(vector &vector_)
{
vector vector_2 = {};
vector euc_dist;
euc_dist.Resize(m_rows);
//matrix temp_matrix = Matrix;
//temp_matrix.Resize(Matrix.Rows(), Matrix.Cols()-1); //remove the last column of independent variables
for(ulong i=0; i<m_rows; i++)
{
vector_2 = Matrix.Row(i);
vector_2.Resize(m_cols-1);
euc_dist[i] = NormalizeDouble(Euclidean_distance(vector_, vector_2), 5);
}
//---
if(isdebug)
{
matrix dbgMatrix = Matrix; //temporary debug matrix
dbgMatrix.Resize(dbgMatrix.Rows(), dbgMatrix.Cols()+1);
dbgMatrix.Col(euc_dist, dbgMatrix.Cols()-1);
//Print("Matrix w Euclidean Distance\n",dbgMatrix);
ZeroMemory(dbgMatrix);
}
//---
uint size = (uint)m_target.Size();
double tarArr[];
ArrayResize(tarArr, size);
double eucArray[];
ArrayResize(eucArray, size);
for(ulong i=0; i<size; i++) //convert the vectors to array
{
tarArr[i] = m_target[i];
eucArray[i] = euc_dist[i];
}
double track[], NN[];
ArrayCopy(track, tarArr);
int max;
for(int i=0; i<(int)m_target.Size(); i++)
{
if(ArraySize(track) > (int)k)
{
max = ArrayMaximum(eucArray);
ArrayRemove(eucArray, max, 1);
ArrayRemove(track, max, 1);
}
}
ArrayCopy(NN, eucArray);
/*
Print("NN ");
ArrayPrint(NN);
Print("Track ");
ArrayPrint(track);
*/
//--- Voting process
vector votes(m_classesVector.Size());
for(ulong i=0; i<votes.Size(); i++)
{
int count = 0;
for(ulong j=0; j<track.Size(); j++)
{
if(m_classesVector[i] == track[j])
count++;
}
votes[i] = (double)count;
if(votes.Sum() == k) //all members have voted
break;
}
if(isdebug)
Print(vector_, " belongs to class ", (int)m_classesVector[votes.ArgMax()]);
return((int)m_classesVector[votes.ArgMax()]);
}
//+------------------------------------------------------------------+
double CKNNNearestNeighbors:: Euclidean_distance(const vector &v1, const vector &v2)
{
double dist = 0;
if(v1.Size() != v2.Size())
Print(__FUNCTION__, " v1 and v2 not matching in size");
else
{
double c = 0;
for(ulong i=0; i<v1.Size(); i++)
c += MathPow(v1[i] - v2[i], 2);
dist = MathSqrt(c);
}
return(dist);
}
//+------------------------------------------------------------------+
vector CKNNNearestNeighbors::ClassVector()
{
vector t_vectors = Matrix.Col(m_cols-1); //target variables are found on the last column in the matrix
vector temp_t = t_vectors, v = {t_vectors[0]};
for(ulong i=0, count =1; i<m_rows; i++) //counting the different neighbors
{
for(ulong j=0; j<m_rows; j++)
{
if(t_vectors[i] == temp_t[j] && temp_t[j] != -1000)
{
bool count_ready = false;
for(ulong n=0; n<v.Size(); n++)
if(t_vectors[i] == v[n])
count_ready = true;
if(!count_ready)
{
count++;
v.Resize(count);
v[count-1] = t_vectors[i];
temp_t[j] = -1000; //modify so that it can no more be counted
}
else
break;
//Print("t vectors vector ",t_vectors);
}
else
continue;
}
}
return(v);
}
//+------------------------------------------------------------------+
vector CKNNNearestNeighbors::CrossValidation_LOOCV(uint initial_k = 0, uint final_k=1)
{
uint iterations = final_k-initial_k;
vector cv(iterations);
ulong N = m_rows;
matrix OG_Matrix = Matrix; //The original matrix
ulong OG_rows = m_rows; //the primarly value of rows
vector OG_target = m_target;
ulong size = N-1;
m_rows = m_rows-1; //leavo one row out
for (uint z = initial_k; z<final_k; z++)
{
if (iterations>1) k = z;
double sum_mse = 0;
for (ulong i=0; i<size; i++)
{
MatrixRemoveRow(Matrix,i);
m_target.Resize(m_rows);
vector P(1), A = { OG_target[i] }, v;
{
v = OG_Matrix.Row(i);
v.Resize(m_cols-1);
P[0] = KNNAlgorithm(v);
}
if (isdebug)
Print("\n Actual ",A," Predicted ",P," ",i," MSE = ",Mse(A,P),"\n");
sum_mse += Mse(A,P);
Matrix.Copy(OG_Matrix);
}
cv[z] = (float)sum_mse/size;
}
Matrix.Copy(OG_Matrix);
m_rows = OG_rows;
m_target = OG_target;
return (cv);
}
//+------------------------------------------------------------------+
void CKNNNearestNeighbors::MatrixRemoveRow(matrix &mat,ulong row)
{
matrix new_matrix(mat.Rows()-1,mat.Cols()); //Remove the one column
for (ulong j=0; j<mat.Cols(); j++)
for (ulong i=0, new_rows=0; i<mat.Rows(); i++)
{
if (i == row) continue;
else
{
new_matrix[new_rows][j] = mat[i][j];
new_rows++;
}
}
mat.Copy(new_matrix);
}
//+------------------------------------------------------------------+
void CKNNNearestNeighbors::VectorRemoveIndex(vector &v, ulong index)
{
vector new_v(v.Size()-1);
for (ulong i=0, count = 0; i<v.Size(); i++)
if (i == index)
{
new_v[count] = new_v[i];
count++;
}
}
//+------------------------------------------------------------------+
double CKNNNearestNeighbors::Mse(vector &A,vector &P)
{
double err = 0;
vector c;
if (A.Size() != P.Size())
Print(__FUNCTION__," Err, A and P vectors not the same in size");
else
{
ulong size = A.Size();
c.Resize(size);
c = MathPow(A - P,2);
err = (c.Sum()) / size;
}
return (err);
}
//+------------------------------------------------------------------+
matrix CKNNNearestNeighbors::ConfusionMatrix(vector &A,vector &P)
{
ulong size = m_classesVector.Size();
matrix mat_(size,size);
if (A.Size() != P.Size())
Print("Cant create confusion matrix | A and P not having the same size ");
else
{
int tn = 0,fn =0,fp =0, tp=0;
for (ulong i = 0; i<A.Size(); i++)
{
if (A[i]== P[i] && P[i]==m_classesVector[0])
tp++;
if (A[i]== P[i] && P[i]==m_classesVector[1])
tn++;
if (P[i]==m_classesVector[0] && A[i]==m_classesVector[1])
fp++;
if (P[i]==m_classesVector[1] && A[i]==m_classesVector[0])
fn++;
}
mat_[0][0] = tn; mat_[0][1] = fp;
mat_[1][0] = fn; mat_[1][1] = tp;
}
return(mat_);
}
//+------------------------------------------------------------------+
float CKNNNearestNeighbors::TrainTest(double train_size=0.700000)
{
//--- Split the matrix
matrix default_Matrix = Matrix;
int train = (int)MathCeil(m_rows*train_size),
test = (int)MathFloor(m_rows*(1-train_size));
if (isdebug) printf("Train %d test %d",train,test);
matrix TrainMatrix(train,m_cols), TestMatrix(test,m_cols);
int train_index = 0, test_index =0;
//---
for (ulong r=0; r<Matrix.Rows(); r++)
{
if ((int)r < train)
{
TrainMatrix.Row(Matrix.Row(r),train_index);
train_index++;
}
else
{
TestMatrix.Row(Matrix.Row(r),test_index);
test_index++;
}
}
if (isdebug)
Print("TrainMatrix\n",TrainMatrix,"\nTestMatrix\n",TestMatrix);
//--- Training the Algorithm
Matrix.Copy(TrainMatrix); //That's it ???
//--- Testing the Algorithm
vector TestPred(TestMatrix.Rows());
vector TargetPred = TestMatrix.Col(m_cols-1);
vector v_in = {};
for (ulong i=0; i<TestMatrix.Rows(); i++)
{
v_in = TestMatrix.Row(i);
v_in.Resize(v_in.Size()-1); //Remove independent variable
TestPred[i] = KNNAlgorithm(v_in);
}
matrix cf_m = ConfusionMatrix(TargetPred,TestPred);
vector diag = cf_m.Diag();
float acc = (float)(diag.Sum()/cf_m.Sum())*100;
Print("Confusion Matrix\n",cf_m,"\nAccuracy ------> ",acc,"%");
return(acc);
}
//+------------------------------------------------------------------+ | MALE5/Neighbors/KNN_nearest_neighbors.mqh/0 | {
"file_path": "MALE5/Neighbors/KNN_nearest_neighbors.mqh",
"repo_id": "MALE5",
"token_count": 5884
} | 36 |
//+------------------------------------------------------------------+
//| preprocessing.mqh |
//| Copyright 2022, Fxalgebra.com |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Fxalgebra.com"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| LABEL ENCODE CLASS FOR PREPROCESSING INFORMATION |
//+------------------------------------------------------------------+
struct CLabelEncoder
{
private:
int dummy;
void Unique(const string &Array[], string &classes_arr[]) //From matrix<T> utils
{
string temp_arr[];
ArrayResize(classes_arr,1);
ArrayCopy(temp_arr,Array);
classes_arr[0] = Array[0];
for(int i=0, count =1; i<ArraySize(Array); i++) //counting the different neighbors
{
for(int j=0; j<ArraySize(Array); j++)
{
if(Array[i] == temp_arr[j] && temp_arr[j] != "-nan")
{
bool count_ready = false;
for(int n=0; n<ArraySize(classes_arr); n++)
if(Array[i] == classes_arr[n])
count_ready = true;
if(!count_ready)
{
count++;
ArrayResize(classes_arr,count);
classes_arr[count-1] = Array[i];
temp_arr[j] = "-nan"; //modify so that it can no more be counted
}
else
break;
//Print("t vectors vector<T> ",v);
}
else
continue;
}
}
}
//--- Sort the array based on the bubble algorithm
bool BubbleSortStrings(string &arr[])
{
int arraySize = ArraySize(arr);
if (arraySize == 0)
{
Print(__FUNCTION__," Failed to Sort | ArraySize = 0");
return false;
}
for(int i = 0; i < arraySize - 1; i++)
{
for(int j = 0; j < arraySize - i - 1; j++)
{
if(StringCompare(arr[j], arr[j + 1], false) > 0)
{
// Swap arr[j] and arr[j + 1]
string temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
}
}
}
return true;
}
public:
vector encode(string &Arr[])
{
string unique_values[];
Unique(Arr, unique_values);
vector ret(ArraySize(Arr));
if (!BubbleSortStrings(unique_values))
return ret;
for (int i=0; i<ArraySize(unique_values); i++)
for (int j=0; j<ArraySize(Arr); j++)
if (unique_values[i] == Arr[j])
ret[j] = i+1;
return ret;
}
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#include "MatrixExtend.mqh";
//+------------------------------------------------------------------+
//| |
//| |
//| Standardization Scaler |
//| |
//| |
//+------------------------------------------------------------------+
class StandardizationScaler
{
protected:
vector mean, std;
bool loaded_scaler;
public:
StandardizationScaler(void);
StandardizationScaler(const double &mean[], const double &std[]); //For Loading the pre-fitted scaler
~StandardizationScaler(void);
virtual matrix fit_transform(const matrix &X);
virtual matrix transform(const matrix &X);
virtual vector transform(const vector &X);
virtual bool save(string save_dir);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
StandardizationScaler::StandardizationScaler(void)
{
loaded_scaler = false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
StandardizationScaler::StandardizationScaler(const double &mean_[],const double &std_[])
{
this.mean = MatrixExtend::ArrayToVector(mean_);
this.std = MatrixExtend::ArrayToVector(std_);
loaded_scaler = true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
StandardizationScaler::~StandardizationScaler(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix StandardizationScaler::fit_transform(const matrix &X)
{
if (loaded_scaler)
{
printf("% This is a loaded scaler | no need to fit to the new data, call another instance of a class",__FUNCTION__);
return X;
}
this.mean.Resize(X.Cols());
this.std.Resize(X.Cols());
for (ulong i=0; i<X.Cols(); i++)
{
this.mean[i] = X.Col(i).Mean();
this.std[i] = X.Col(i).Std();
}
//---
return this.transform(X);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix StandardizationScaler::transform(const matrix &X)
{
matrix X_norm = X;
for (ulong i=0; i<X.Rows(); i++)
X_norm.Row(this.transform(X.Row(i)), i);
return X_norm;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector StandardizationScaler::transform(const vector &X)
{
vector v(X.Size());
if (this.mean.Size()==0 || this.std.Size()==0)
{
printf("%s Call the fit_transform function first to fit the scaler or\n Load the pre-fitted scaler before attempting to transform the new data",__FUNCTION__);
return v;
}
if (X.Size() != this.mean.Size())
{
printf("%s Dimension mismatch between trained data sized=(%d) and the new data sized=(%d)",__FUNCTION__,this.mean.Size(),X.Size());
return v;
}
for (ulong i=0; i<v.Size(); i++)
v[i] = (X[i] - this.mean[i]) / (this.std[i] + 1e-10);
return v;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool StandardizationScaler::save(string save_dir)
{
//---save mean
if (!MatrixExtend::write_bin(this.mean, save_dir+"\\mean.bin"))
{
printf("%s Failed Save the mean values of the Scaler",__FUNCTION__);
return false;
}
//--- save std
if (!MatrixExtend::write_bin(this.std, save_dir+"\\std.bin"))
{
printf("%s Failed Save the Standard deviation values of the Scaler",__FUNCTION__);
return false;
}
return true;
}
//+------------------------------------------------------------------+
//| |
//| |
//| Min-Max Scaler |
//| |
//| |
//+------------------------------------------------------------------+
class MinMaxScaler
{
protected:
vector min, max;
bool loaded_scaler;
public:
MinMaxScaler(void);
MinMaxScaler(const double &min_[], const double &max_[]); //For Loading the pre-fitted scaler
~MinMaxScaler(void);
virtual matrix fit_transform(const matrix &X);
virtual matrix transform(const matrix &X);
virtual vector transform(const vector &X);
virtual bool save(string dir);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
MinMaxScaler::MinMaxScaler(void)
{
loaded_scaler =false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
MinMaxScaler::~MinMaxScaler(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
MinMaxScaler::MinMaxScaler(const double &min_[],const double &max_[])
{
this.min = MatrixExtend::ArrayToVector(min_);
this.max = MatrixExtend::ArrayToVector(max_);
loaded_scaler = true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MinMaxScaler::fit_transform(const matrix &X)
{
if (loaded_scaler)
{
printf("% This is a loaded scaler | no need to fit to the new data, call another instance of a class",__FUNCTION__);
return X;
}
//---
this.min.Resize(X.Cols());
this.max.Resize(X.Cols());
for (ulong i=0; i<X.Cols(); i++)
{
this.min[i] = X.Col(i).Min();
this.max[i] = X.Col(i).Max();
}
if (MQLInfoInteger(MQL_DEBUG))
Print("Min: ",this.min,"\nMax: ",this.max);
//---
return this.transform(X);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector MinMaxScaler::transform(const vector &X)
{
vector v(X.Size());
if (this.min.Size()==0 || this.max.Size()==0)
{
printf("%s Call the fit_transform function fist to fit the scaler or\n the load function to load the pre-fitted scalerbefore attempting to transform the new data",__FUNCTION__);
return v;
}
if (X.Size() != this.min.Size())
{
printf("%s X of size [%d] doesn't match the same number of features in a given X matrix on the fit_transform function call",__FUNCTION__,this.min.Size());
return v;
}
for (ulong i=0; i<X.Size(); i++)
v[i] = (X[i] - this.min[i]) / ((this.max[i] - this.min[i]) + 1e-10);
return v;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MinMaxScaler::transform(const matrix &X)
{
matrix X_norm = X;
for (ulong i=0; i<X.Rows(); i++)
X_norm.Row(this.transform(X.Row(i)), i);
return X_norm;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool MinMaxScaler::save(string save_dir)
{
//---save min
if (!MatrixExtend::write_bin(this.min, save_dir+"\\min.bin"))
{
printf("%s Failed to save the Min values for the scaler",__FUNCTION__);
return false;
}
//--- save max
if (!MatrixExtend::write_bin(this.max, save_dir+"\\max.bin"))
{
printf("%s Failed to save the Max values for the scaler",__FUNCTION__);
return false;
}
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
//+------------------------------------------------------------------+
//| |
//| |
//| Mean Normalization Scaler |
//| |
//| |
//+------------------------------------------------------------------+
class RobustScaler
{
protected:
vector median, quantile;
bool loaded_scaler;
public:
RobustScaler(void);
RobustScaler(const double &median_[], const double &quantile_[]);
~RobustScaler(void);
virtual matrix fit_transform(const matrix &X);
virtual matrix transform(const matrix &X);
virtual vector transform(const vector &X);
virtual bool save(string save_dir);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
RobustScaler::RobustScaler(void)
{
loaded_scaler = false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
RobustScaler::RobustScaler(const double &median_[],const double &quantile_[])
{
this.median = MatrixExtend::ArrayToVector(median_);
this.quantile = MatrixExtend::ArrayToVector(quantile_);
loaded_scaler = true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
RobustScaler::~RobustScaler(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix RobustScaler::fit_transform(const matrix &X)
{
if (loaded_scaler)
{
printf("% This is a loaded scaler | no need to fit to the new data, call another instance of a class",__FUNCTION__);
return X;
}
//---
this.median.Resize(X.Cols());
this.quantile.Resize(X.Cols());
for (ulong i=0; i<X.Cols(); i++)
{
this.median[i] = X.Col(i).Median();
this.quantile[i] = MathAbs(X.Col(i) - this.median[i]).Median() * 1.4826; // 1.4826 is a constant for consistency;
}
if (MQLInfoInteger(MQL_DEBUG))
Print("Median: ",this.median,"\nQuantile: ",this.quantile);
//---
return this.transform(X);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix RobustScaler::transform(const matrix &X)
{
matrix X_norm = X;
for (ulong i=0; i<X.Rows(); i++)
X_norm.Row(this.transform(X.Row(i)), i);
return X_norm;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector RobustScaler::transform(const vector &X)
{
vector v(X.Size());
if (this.median.Size()==0)
{
printf("%s Call the fit_transform function fist to fit the scaler or\n the load function to load the pre-fitted scalerbefore attempting to transform the new data",__FUNCTION__);
return v;
}
if (X.Size() != this.median.Size())
{
printf("%s X of size [%d] doesn't match the same number of features in a given X matrix on the fit_transform function call",__FUNCTION__,this.median.Size());
return v;
}
for (ulong i=0; i<X.Size(); i++)
v[i] = (X[i] - this.median[i]) / (quantile[i] + 1e-10);
return v;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool RobustScaler::save(string save_dir)
{
//--- save median
if (!MatrixExtend::write_bin(this.median, save_dir+"\\median.bin"))
{
printf("%s Failed to save the Median values for the scaler",__FUNCTION__);
return false;
}
//--- save quantile
if (!MatrixExtend::write_bin(this.quantile, save_dir+"\\quantile.bin"))
{
printf("%s Failed to save the Quantile values for the scaler",__FUNCTION__);
return false;
}
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/preprocessing.mqh/0 | {
"file_path": "MALE5/preprocessing.mqh",
"repo_id": "MALE5",
"token_count": 8928
} | 37 |
<jupyter_start><jupyter_code>import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
datapath = r"NASDAQ_DATA.csv"
data = pd.read_csv(datapath)
print(data.shape)
data.head()
import seaborn as sns
datasets = data.iloc[:, 0:]
sns.pairplot(datasets,diag_kind="kde")<jupyter_output><empty_output><jupyter_text>The Data has to be converted to a numpy array before it can be used to get for the correlation coefficient<jupyter_code>y = np.array(data["NASDAQ"])
x_list = ["S&P500","13RSI","50SMA"]
print("Checking the correlation")
for i in x_list:
x = np.array(data[i])
print("\n",i, np.corrcoef(x,y))<jupyter_output>Checking the correlation
S&P500 [[1. 0.98561213]
[0.98561213 1. ]]
13RSI [[1. 0.42778016]
[0.42778016 1. ]]
50SMA [[1. 0.88531674]
[0.88531674 1. ]]<jupyter_text>Data has to be reshaped into one column but unkown rows (-1,1)<jupyter_code>x = data.drop("NASDAQ",axis=1)
y = data["NASDAQ"].values.reshape(-1,1)
reg = LinearRegression()
reg = reg.fit(x,y)
print("coefficients of our model")
print(reg.coef_)
print(reg.intercept_)<jupyter_output>coefficients of our model
[[2.75526744 0.37952369 8.06681286]]
[-3670.9716706] | MatrixRegressionMQL5/LinearRegression.ipynb/0 | {
"file_path": "MatrixRegressionMQL5/LinearRegression.ipynb",
"repo_id": "MatrixRegressionMQL5",
"token_count": 579
} | 38 |
//+------------------------------------------------------------------+
//| NNTestScript.mq5 |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
#property version "1.00"
#property strict
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
#include "NeuralNets.mqh";
CNeuralNets *neuralnet;
//+------------------------------------------------------------------+
void OnStart()
{
int hlnodes[3] = {4,6,1};
int outputs = 1;
int inputs_=2;
double XMatrix[]; int rows,cols;
CSVToMatrix(XMatrix,rows,cols,"NASDAQ_DATA.csv");
//MatrixPrint(XMatrix,cols,3);
/*
neuralnet = new CNeuralNets(SIGMOID,RELU,cols,hlnodes,outputs);
neuralnet.train_feedforwardMLP(XMatrix);
delete(neuralnet);
*/
double weights[], bias[];
int handlew = FileOpen("NNTestScript\\model_w.bin",FILE_READ|FILE_BIN);
FileReadArray(handlew,weights);
FileClose(handlew);
int handleb = FileOpen("NNTestScript\\model_b.bin",FILE_READ|FILE_BIN);
FileReadArray(handleb,bias);
FileClose(handleb);
double Inputs[]; ArrayCopy(Inputs,XMatrix,0,0,cols); //copy the four first columns from this matrix
double Output[];
neuralnet = new CNeuralNets(RELU,RELU,cols,hlnodes,outputs);
neuralnet.FeedForwardMLP(Inputs,Output,weights,bias);
Print("Outputs");
ArrayPrint(Output);
delete(neuralnet);
}
//+------------------------------------------------------------------+
| NeuralNetworks-MQL5/NNTestScript.mq5/0 | {
"file_path": "NeuralNetworks-MQL5/NNTestScript.mq5",
"repo_id": "NeuralNetworks-MQL5",
"token_count": 865
} | 39 |
//+------------------------------------------------------------------+
//| ONNX get data.mq5 |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
#property description "Script for collecting data for a simple ONNX tutorial project"
#property version "1.00"
#property script_show_inputs
#include <MALE5\preprocessing.mqh>
CPreprocessing<vectorf, matrixf> *norm_x;
input uint start_bar = 100;
input uint total_bars = 10000;
input norm_technique NORM = NORM_STANDARDIZATION;
vectorf OPEN,
HIGH,
LOW,
CLOSE;
string csv_header = "";
string x_vars = ""; //Independent variable names for saving them into a csv file
string csv_name_ = "";
string normparams_folder = "ONNX Normparams\\";
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
//---
matrixf dataset = GetTrainData(start_bar, total_bars);
Print("Train data\n",dataset);
// matrixf dataset = GetTrainData(0, 1);
//
// Print("Live data\n",dataset);
while (CheckPointer(norm_x) != POINTER_INVALID)
delete (norm_x);
}
//+------------------------------------------------------------------+
//| This Function obtains the total variable sum of historical data |
//| starting at the bar located at the start variable |
//+------------------------------------------------------------------+
matrixf GetTrainData(uint start, uint total)
{
matrixf return_matrix(total, 3);
ulong last_col;
OPEN.CopyRates(Symbol(), PERIOD_CURRENT, COPY_RATES_OPEN, start, total);
HIGH.CopyRates(Symbol(), PERIOD_CURRENT, COPY_RATES_HIGH, start, total);
LOW.CopyRates(Symbol(), PERIOD_CURRENT, COPY_RATES_LOW, start, total);
CLOSE.CopyRates(Symbol(), PERIOD_CURRENT, COPY_RATES_CLOSE, start, total);
return_matrix.Col(OPEN, 0);
return_matrix.Col(HIGH, 1);
return_matrix.Col(LOW, 2);
matrixf norm_params = {};
csv_name_ = Symbol()+"."+EnumToString(Period())+"."+string(total_bars);
x_vars = "OPEN,HIGH,LOW";
while (CheckPointer(norm_x) != POINTER_INVALID)
delete (norm_x);
norm_x = new CPreprocessing<vectorf, matrixf>(return_matrix, NORM);
//--- Saving the normalization prameters
switch(NORM)
{
case NORM_MEAN_NORM:
//--- saving the mean
norm_params.Assign(norm_x.mean_norm_scaler.mean);
WriteCsv(normparams_folder+csv_name_+".mean_norm_scaler.mean.csv",norm_params,x_vars);
//--- saving the min
norm_params.Assign(norm_x.mean_norm_scaler.min);
WriteCsv(normparams_folder+csv_name_+".mean_norm_scaler.min.csv",norm_params,x_vars);
//--- saving the max
norm_params.Assign(norm_x.mean_norm_scaler.max);
WriteCsv(normparams_folder+csv_name_+".mean_norm_scaler.max.csv",norm_params,x_vars);
break;
case NORM_MIN_MAX_SCALER:
//--- saving the min
norm_params.Assign(norm_x.min_max_scaler.min);
WriteCsv(normparams_folder+csv_name_+".min_max_scaler.min.csv",norm_params,x_vars);
//--- saving the max
norm_params.Assign(norm_x.min_max_scaler.max);
WriteCsv(normparams_folder+csv_name_+".min_max_scaler.max.csv",norm_params,x_vars);
break;
case NORM_STANDARDIZATION:
//--- saving the mean
norm_params.Assign(norm_x.standardization_scaler.mean);
WriteCsv(normparams_folder+csv_name_+".standardization_scaler.mean.csv",norm_params,x_vars);
//--- saving the std
norm_params.Assign(norm_x.standardization_scaler.std);
WriteCsv(normparams_folder+csv_name_+".standardization_scaler.std.csv",norm_params,x_vars);
break;
}
return_matrix.Resize(total, 4); //if we are collecting the train data collect the target variable also
last_col = return_matrix.Cols()-1; //Column located at the last index is the last column
return_matrix.Col(CLOSE, last_col); //put the close price information in the last column of a matrix
csv_name_ +=".targ=CLOSE";
csv_header = x_vars + ",CLOSE";
if (!WriteCsv("ONNX Datafolder\\"+csv_name_+".csv", return_matrix, csv_header))
Print("Failed to Write to a csv file");
else
Print("Data saved to a csv file successfully");
return return_matrix;
}
//+------------------------------------------------------------------+
//| Writting a matrix to a csv file |
//+------------------------------------------------------------------+
bool WriteCsv(string csv_name, const matrixf &matrix_, string header_string)
{
FileDelete(csv_name);
int handle = FileOpen(csv_name,FILE_WRITE|FILE_CSV|FILE_ANSI,",",CP_UTF8);
ResetLastError();
if(handle == INVALID_HANDLE)
{
printf("Invalid %s handle Error %d ",csv_name,GetLastError());
return (false);
}
string concstring;
vectorf row = {};
datetime time_start = GetTickCount(), current_time;
string header[];
ushort u_sep;
u_sep = StringGetCharacter(",",0);
StringSplit(header_string,u_sep, header);
vectorf colsinrows = matrix_.Row(0);
if (ArraySize(header) != (int)colsinrows.Size())
{
printf("headers=%d and columns=%d from the matrix vary is size ",ArraySize(header),colsinrows.Size());
return false;
}
//---
string header_str = "";
for (int i=0; i<ArraySize(header); i++)
header_str += header[i] + (i+1 == colsinrows.Size() ? "" : ",");
FileWrite(handle,header_str);
FileSeek(handle,0,SEEK_SET);
for(ulong i=0; i<matrix_.Rows() && !IsStopped(); i++)
{
ZeroMemory(concstring);
row = matrix_.Row(i);
for(ulong j=0, cols =1; j<row.Size() && !IsStopped(); j++, cols++)
{
current_time = GetTickCount();
Comment("Writting ",csv_name," record [",i+1,"/",matrix_.Rows(),"] Time taken | ",ConvertTime((current_time - time_start) / 1000.0));
concstring += (string)row[j] + (cols == matrix_.Cols() ? "" : ",");
}
FileSeek(handle,0,SEEK_END);
FileWrite(handle,concstring);
}
FileClose(handle);
return (true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
string ConvertTime(double seconds)
{
string time_str = "";
uint minutes = 0, hours = 0;
if (seconds >= 60)
{
minutes = (uint)(seconds / 60.0) ;
seconds = fmod(seconds, 1.0) * 60;
time_str = StringFormat("%d Minutes and %.3f Seconds", minutes, seconds);
}
if (minutes >= 60)
{
hours = (uint)(minutes / 60.0);
minutes = minutes % 60;
time_str = StringFormat("%d Hours and %d Minutes", hours, minutes);
}
if (time_str == "")
{
time_str = StringFormat("%.3f Seconds", seconds);
}
return time_str;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| ONNX-MQL5/ONNX get data.mq5/0 | {
"file_path": "ONNX-MQL5/ONNX get data.mq5",
"repo_id": "ONNX-MQL5",
"token_count": 3484
} | 40 |
{% extends "base_template.html" %}
<h2>Author Delete !</h2>
<h1>Are you sure you want to delete this Author: {{author}}?</h1>
{% block content %}
<form action="" method="post">
{% csrf_token %}
<input type="submit" value="Yes, Delete.">
</form>
{% endblock %} | Django-locallibrary/LocalLibrary/catalog/Templates/Author_delete.html/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/Templates/Author_delete.html",
"repo_id": "Django-locallibrary",
"token_count": 120
} | 0 |
<svg width="14" height="84" viewBox="0 0 1792 10752" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<g id="sort">
<path d="M1408 1088q0 26-19 45l-448 448q-19 19-45 19t-45-19l-448-448q-19-19-19-45t19-45 45-19h896q26 0 45 19t19 45zm0-384q0 26-19 45t-45 19h-896q-26 0-45-19t-19-45 19-45l448-448q19-19 45-19t45 19l448 448q19 19 19 45z"/>
</g>
<g id="ascending">
<path d="M1408 1216q0 26-19 45t-45 19h-896q-26 0-45-19t-19-45 19-45l448-448q19-19 45-19t45 19l448 448q19 19 19 45z"/>
</g>
<g id="descending">
<path d="M1408 704q0 26-19 45l-448 448q-19 19-45 19t-45-19l-448-448q-19-19-19-45t19-45 45-19h896q26 0 45 19t19 45z"/>
</g>
</defs>
<use xlink:href="#sort" x="0" y="0" fill="#999999" />
<use xlink:href="#sort" x="0" y="1792" fill="#447e9b" />
<use xlink:href="#ascending" x="0" y="3584" fill="#999999" />
<use xlink:href="#ascending" x="0" y="5376" fill="#447e9b" />
<use xlink:href="#descending" x="0" y="7168" fill="#999999" />
<use xlink:href="#descending" x="0" y="8960" fill="#447e9b" />
</svg>
| Django-locallibrary/LocalLibrary/staticfiles/admin/img/sorting-icons.3a097b59f104.svg/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/img/sorting-icons.3a097b59f104.svg",
"repo_id": "Django-locallibrary",
"token_count": 558
} | 1 |
/*global SelectBox, gettext, interpolate, quickElement, SelectFilter*/
/*
SelectFilter2 - Turns a multiple-select box into a filter interface.
Requires core.js and SelectBox.js.
*/
'use strict';
{
window.SelectFilter = {
init: function(field_id, field_name, is_stacked) {
if (field_id.match(/__prefix__/)) {
// Don't initialize on empty forms.
return;
}
const from_box = document.getElementById(field_id);
from_box.id += '_from'; // change its ID
from_box.className = 'filtered';
for (const p of from_box.parentNode.getElementsByTagName('p')) {
if (p.classList.contains("info")) {
// Remove <p class="info">, because it just gets in the way.
from_box.parentNode.removeChild(p);
} else if (p.classList.contains("help")) {
// Move help text up to the top so it isn't below the select
// boxes or wrapped off on the side to the right of the add
// button:
from_box.parentNode.insertBefore(p, from_box.parentNode.firstChild);
}
}
// <div class="selector"> or <div class="selector stacked">
const selector_div = quickElement('div', from_box.parentNode);
selector_div.className = is_stacked ? 'selector stacked' : 'selector';
// <div class="selector-available">
const selector_available = quickElement('div', selector_div);
selector_available.className = 'selector-available';
const title_available = quickElement('h2', selector_available, interpolate(gettext('Available %s') + ' ', [field_name]));
quickElement(
'span', title_available, '',
'class', 'help help-tooltip help-icon',
'title', interpolate(
gettext(
'This is the list of available %s. You may choose some by ' +
'selecting them in the box below and then clicking the ' +
'"Choose" arrow between the two boxes.'
),
[field_name]
)
);
const filter_p = quickElement('p', selector_available, '', 'id', field_id + '_filter');
filter_p.className = 'selector-filter';
const search_filter_label = quickElement('label', filter_p, '', 'for', field_id + '_input');
quickElement(
'span', search_filter_label, '',
'class', 'help-tooltip search-label-icon',
'title', interpolate(gettext("Type into this box to filter down the list of available %s."), [field_name])
);
filter_p.appendChild(document.createTextNode(' '));
const filter_input = quickElement('input', filter_p, '', 'type', 'text', 'placeholder', gettext("Filter"));
filter_input.id = field_id + '_input';
selector_available.appendChild(from_box);
const choose_all = quickElement('a', selector_available, gettext('Choose all'), 'title', interpolate(gettext('Click to choose all %s at once.'), [field_name]), 'href', '#', 'id', field_id + '_add_all_link');
choose_all.className = 'selector-chooseall';
// <ul class="selector-chooser">
const selector_chooser = quickElement('ul', selector_div);
selector_chooser.className = 'selector-chooser';
const add_link = quickElement('a', quickElement('li', selector_chooser), gettext('Choose'), 'title', gettext('Choose'), 'href', '#', 'id', field_id + '_add_link');
add_link.className = 'selector-add';
const remove_link = quickElement('a', quickElement('li', selector_chooser), gettext('Remove'), 'title', gettext('Remove'), 'href', '#', 'id', field_id + '_remove_link');
remove_link.className = 'selector-remove';
// <div class="selector-chosen">
const selector_chosen = quickElement('div', selector_div);
selector_chosen.className = 'selector-chosen';
const title_chosen = quickElement('h2', selector_chosen, interpolate(gettext('Chosen %s') + ' ', [field_name]));
quickElement(
'span', title_chosen, '',
'class', 'help help-tooltip help-icon',
'title', interpolate(
gettext(
'This is the list of chosen %s. You may remove some by ' +
'selecting them in the box below and then clicking the ' +
'"Remove" arrow between the two boxes.'
),
[field_name]
)
);
const to_box = quickElement('select', selector_chosen, '', 'id', field_id + '_to', 'multiple', '', 'size', from_box.size, 'name', from_box.name);
to_box.className = 'filtered';
const clear_all = quickElement('a', selector_chosen, gettext('Remove all'), 'title', interpolate(gettext('Click to remove all chosen %s at once.'), [field_name]), 'href', '#', 'id', field_id + '_remove_all_link');
clear_all.className = 'selector-clearall';
from_box.name = from_box.name + '_old';
// Set up the JavaScript event handlers for the select box filter interface
const move_selection = function(e, elem, move_func, from, to) {
if (elem.classList.contains('active')) {
move_func(from, to);
SelectFilter.refresh_icons(field_id);
}
e.preventDefault();
};
choose_all.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move_all, field_id + '_from', field_id + '_to');
});
add_link.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move, field_id + '_from', field_id + '_to');
});
remove_link.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move, field_id + '_to', field_id + '_from');
});
clear_all.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move_all, field_id + '_to', field_id + '_from');
});
filter_input.addEventListener('keypress', function(e) {
SelectFilter.filter_key_press(e, field_id);
});
filter_input.addEventListener('keyup', function(e) {
SelectFilter.filter_key_up(e, field_id);
});
filter_input.addEventListener('keydown', function(e) {
SelectFilter.filter_key_down(e, field_id);
});
selector_div.addEventListener('change', function(e) {
if (e.target.tagName === 'SELECT') {
SelectFilter.refresh_icons(field_id);
}
});
selector_div.addEventListener('dblclick', function(e) {
if (e.target.tagName === 'OPTION') {
if (e.target.closest('select').id === field_id + '_to') {
SelectBox.move(field_id + '_to', field_id + '_from');
} else {
SelectBox.move(field_id + '_from', field_id + '_to');
}
SelectFilter.refresh_icons(field_id);
}
});
from_box.closest('form').addEventListener('submit', function() {
SelectBox.select_all(field_id + '_to');
});
SelectBox.init(field_id + '_from');
SelectBox.init(field_id + '_to');
// Move selected from_box options to to_box
SelectBox.move(field_id + '_from', field_id + '_to');
if (!is_stacked) {
// In horizontal mode, give the same height to the two boxes.
const j_from_box = document.getElementById(field_id + '_from');
const j_to_box = document.getElementById(field_id + '_to');
let height = filter_p.offsetHeight + j_from_box.offsetHeight;
const j_to_box_style = window.getComputedStyle(j_to_box);
if (j_to_box_style.getPropertyValue('box-sizing') === 'border-box') {
// Add the padding and border to the final height.
height += parseInt(j_to_box_style.getPropertyValue('padding-top'), 10)
+ parseInt(j_to_box_style.getPropertyValue('padding-bottom'), 10)
+ parseInt(j_to_box_style.getPropertyValue('border-top-width'), 10)
+ parseInt(j_to_box_style.getPropertyValue('border-bottom-width'), 10);
}
j_to_box.style.height = height + 'px';
}
// Initial icon refresh
SelectFilter.refresh_icons(field_id);
},
any_selected: function(field) {
// Temporarily add the required attribute and check validity.
field.required = true;
const any_selected = field.checkValidity();
field.required = false;
return any_selected;
},
refresh_icons: function(field_id) {
const from = document.getElementById(field_id + '_from');
const to = document.getElementById(field_id + '_to');
// Active if at least one item is selected
document.getElementById(field_id + '_add_link').classList.toggle('active', SelectFilter.any_selected(from));
document.getElementById(field_id + '_remove_link').classList.toggle('active', SelectFilter.any_selected(to));
// Active if the corresponding box isn't empty
document.getElementById(field_id + '_add_all_link').classList.toggle('active', from.querySelector('option'));
document.getElementById(field_id + '_remove_all_link').classList.toggle('active', to.querySelector('option'));
},
filter_key_press: function(event, field_id) {
const from = document.getElementById(field_id + '_from');
// don't submit form if user pressed Enter
if ((event.which && event.which === 13) || (event.keyCode && event.keyCode === 13)) {
from.selectedIndex = 0;
SelectBox.move(field_id + '_from', field_id + '_to');
from.selectedIndex = 0;
event.preventDefault();
}
},
filter_key_up: function(event, field_id) {
const from = document.getElementById(field_id + '_from');
const temp = from.selectedIndex;
SelectBox.filter(field_id + '_from', document.getElementById(field_id + '_input').value);
from.selectedIndex = temp;
},
filter_key_down: function(event, field_id) {
const from = document.getElementById(field_id + '_from');
// right arrow -- move across
if ((event.which && event.which === 39) || (event.keyCode && event.keyCode === 39)) {
const old_index = from.selectedIndex;
SelectBox.move(field_id + '_from', field_id + '_to');
from.selectedIndex = (old_index === from.length) ? from.length - 1 : old_index;
return;
}
// down arrow -- wrap around
if ((event.which && event.which === 40) || (event.keyCode && event.keyCode === 40)) {
from.selectedIndex = (from.length === from.selectedIndex + 1) ? 0 : from.selectedIndex + 1;
}
// up arrow -- wrap around
if ((event.which && event.which === 38) || (event.keyCode && event.keyCode === 38)) {
from.selectedIndex = (from.selectedIndex === 0) ? from.length - 1 : from.selectedIndex - 1;
}
}
};
window.addEventListener('load', function(e) {
document.querySelectorAll('select.selectfilter, select.selectfilterstacked').forEach(function(el) {
const data = el.dataset;
SelectFilter.init(el.id, data.fieldName, parseInt(data.isStacked, 10));
});
});
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/js/SelectFilter2.d250dcb52a9a.js/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/SelectFilter2.d250dcb52a9a.js",
"repo_id": "Django-locallibrary",
"token_count": 5803
} | 2 |
'use strict';
{
const $ = django.jQuery;
const init = function($element, options) {
const settings = $.extend({
ajax: {
data: function(params) {
return {
term: params.term,
page: params.page,
app_label: $element.data('app-label'),
model_name: $element.data('model-name'),
field_name: $element.data('field-name')
};
}
}
}, options);
$element.select2(settings);
};
$.fn.djangoAdminSelect2 = function(options) {
const settings = $.extend({}, options);
$.each(this, function(i, element) {
const $element = $(element);
init($element, settings);
});
return this;
};
$(function() {
// Initialize all autocomplete widgets except the one in the template
// form used when a new formset is added.
$('.admin-autocomplete').not('[name*=__prefix__]').djangoAdminSelect2();
});
$(document).on('formset:added', (function() {
return function(event, $newFormset) {
return $newFormset.find('.admin-autocomplete').djangoAdminSelect2();
};
})(this));
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/js/autocomplete.b6b77d0e5906.js/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/autocomplete.b6b77d0e5906.js",
"repo_id": "Django-locallibrary",
"token_count": 663
} | 3 |
/*global gettext*/
'use strict';
{
window.addEventListener('load', function() {
// Add anchor tag for Show/Hide link
const fieldsets = document.querySelectorAll('fieldset.collapse');
for (const [i, elem] of fieldsets.entries()) {
// Don't hide if fields in this fieldset have errors
if (elem.querySelectorAll('div.errors, ul.errorlist').length === 0) {
elem.classList.add('collapsed');
const h2 = elem.querySelector('h2');
const link = document.createElement('a');
link.id = 'fieldsetcollapser' + i;
link.className = 'collapse-toggle';
link.href = '#';
link.textContent = gettext('Show');
h2.appendChild(document.createTextNode(' ('));
h2.appendChild(link);
h2.appendChild(document.createTextNode(')'));
}
}
// Add toggle to hide/show anchor tag
const toggleFunc = function(ev) {
if (ev.target.matches('.collapse-toggle')) {
ev.preventDefault();
ev.stopPropagation();
const fieldset = ev.target.closest('fieldset');
if (fieldset.classList.contains('collapsed')) {
// Show
ev.target.textContent = gettext('Hide');
fieldset.classList.remove('collapsed');
} else {
// Hide
ev.target.textContent = gettext('Show');
fieldset.classList.add('collapsed');
}
}
};
document.querySelectorAll('fieldset.module').forEach(function(el) {
el.addEventListener('click', toggleFunc);
});
});
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/js/collapse.f84e7410290f.js/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/collapse.f84e7410290f.js",
"repo_id": "Django-locallibrary",
"token_count": 920
} | 4 |
'use strict';
{
const toggleNavSidebar = document.getElementById('toggle-nav-sidebar');
if (toggleNavSidebar !== null) {
const navLinks = document.querySelectorAll('#nav-sidebar a');
function disableNavLinkTabbing() {
for (const navLink of navLinks) {
navLink.tabIndex = -1;
}
}
function enableNavLinkTabbing() {
for (const navLink of navLinks) {
navLink.tabIndex = 0;
}
}
const main = document.getElementById('main');
let navSidebarIsOpen = localStorage.getItem('django.admin.navSidebarIsOpen');
if (navSidebarIsOpen === null) {
navSidebarIsOpen = 'true';
}
if (navSidebarIsOpen === 'false') {
disableNavLinkTabbing();
}
main.classList.toggle('shifted', navSidebarIsOpen === 'true');
toggleNavSidebar.addEventListener('click', function() {
if (navSidebarIsOpen === 'true') {
navSidebarIsOpen = 'false';
disableNavLinkTabbing();
} else {
navSidebarIsOpen = 'true';
enableNavLinkTabbing();
}
localStorage.setItem('django.admin.navSidebarIsOpen', navSidebarIsOpen);
main.classList.toggle('shifted');
});
}
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/js/nav_sidebar.7605597ddf52.js/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/nav_sidebar.7605597ddf52.js",
"repo_id": "Django-locallibrary",
"token_count": 655
} | 5 |
/*global XRegExp*/
'use strict';
{
const LATIN_MAP = {
'À': 'A', 'Á': 'A', 'Â': 'A', 'Ã': 'A', 'Ä': 'A', 'Å': 'A', 'Æ': 'AE',
'Ç': 'C', 'È': 'E', 'É': 'E', 'Ê': 'E', 'Ë': 'E', 'Ì': 'I', 'Í': 'I',
'Î': 'I', 'Ï': 'I', 'Ð': 'D', 'Ñ': 'N', 'Ò': 'O', 'Ó': 'O', 'Ô': 'O',
'Õ': 'O', 'Ö': 'O', 'Ő': 'O', 'Ø': 'O', 'Ù': 'U', 'Ú': 'U', 'Û': 'U',
'Ü': 'U', 'Ű': 'U', 'Ý': 'Y', 'Þ': 'TH', 'Ÿ': 'Y', 'ß': 'ss', 'à': 'a',
'á': 'a', 'â': 'a', 'ã': 'a', 'ä': 'a', 'å': 'a', 'æ': 'ae', 'ç': 'c',
'è': 'e', 'é': 'e', 'ê': 'e', 'ë': 'e', 'ì': 'i', 'í': 'i', 'î': 'i',
'ï': 'i', 'ð': 'd', 'ñ': 'n', 'ò': 'o', 'ó': 'o', 'ô': 'o', 'õ': 'o',
'ö': 'o', 'ő': 'o', 'ø': 'o', 'ù': 'u', 'ú': 'u', 'û': 'u', 'ü': 'u',
'ű': 'u', 'ý': 'y', 'þ': 'th', 'ÿ': 'y'
};
const LATIN_SYMBOLS_MAP = {
'©': '(c)'
};
const GREEK_MAP = {
'α': 'a', 'β': 'b', 'γ': 'g', 'δ': 'd', 'ε': 'e', 'ζ': 'z', 'η': 'h',
'θ': '8', 'ι': 'i', 'κ': 'k', 'λ': 'l', 'μ': 'm', 'ν': 'n', 'ξ': '3',
'ο': 'o', 'π': 'p', 'ρ': 'r', 'σ': 's', 'τ': 't', 'υ': 'y', 'φ': 'f',
'χ': 'x', 'ψ': 'ps', 'ω': 'w', 'ά': 'a', 'έ': 'e', 'ί': 'i', 'ό': 'o',
'ύ': 'y', 'ή': 'h', 'ώ': 'w', 'ς': 's', 'ϊ': 'i', 'ΰ': 'y', 'ϋ': 'y',
'ΐ': 'i', 'Α': 'A', 'Β': 'B', 'Γ': 'G', 'Δ': 'D', 'Ε': 'E', 'Ζ': 'Z',
'Η': 'H', 'Θ': '8', 'Ι': 'I', 'Κ': 'K', 'Λ': 'L', 'Μ': 'M', 'Ν': 'N',
'Ξ': '3', 'Ο': 'O', 'Π': 'P', 'Ρ': 'R', 'Σ': 'S', 'Τ': 'T', 'Υ': 'Y',
'Φ': 'F', 'Χ': 'X', 'Ψ': 'PS', 'Ω': 'W', 'Ά': 'A', 'Έ': 'E', 'Ί': 'I',
'Ό': 'O', 'Ύ': 'Y', 'Ή': 'H', 'Ώ': 'W', 'Ϊ': 'I', 'Ϋ': 'Y'
};
const TURKISH_MAP = {
'ş': 's', 'Ş': 'S', 'ı': 'i', 'İ': 'I', 'ç': 'c', 'Ç': 'C', 'ü': 'u',
'Ü': 'U', 'ö': 'o', 'Ö': 'O', 'ğ': 'g', 'Ğ': 'G'
};
const ROMANIAN_MAP = {
'ă': 'a', 'î': 'i', 'ș': 's', 'ț': 't', 'â': 'a',
'Ă': 'A', 'Î': 'I', 'Ș': 'S', 'Ț': 'T', 'Â': 'A'
};
const RUSSIAN_MAP = {
'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ё': 'yo',
'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'j', 'к': 'k', 'л': 'l', 'м': 'm',
'н': 'n', 'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u',
'ф': 'f', 'х': 'h', 'ц': 'c', 'ч': 'ch', 'ш': 'sh', 'щ': 'sh', 'ъ': '',
'ы': 'y', 'ь': '', 'э': 'e', 'ю': 'yu', 'я': 'ya',
'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ё': 'Yo',
'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'J', 'К': 'K', 'Л': 'L', 'М': 'M',
'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U',
'Ф': 'F', 'Х': 'H', 'Ц': 'C', 'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Sh', 'Ъ': '',
'Ы': 'Y', 'Ь': '', 'Э': 'E', 'Ю': 'Yu', 'Я': 'Ya'
};
const UKRAINIAN_MAP = {
'Є': 'Ye', 'І': 'I', 'Ї': 'Yi', 'Ґ': 'G', 'є': 'ye', 'і': 'i',
'ї': 'yi', 'ґ': 'g'
};
const CZECH_MAP = {
'č': 'c', 'ď': 'd', 'ě': 'e', 'ň': 'n', 'ř': 'r', 'š': 's', 'ť': 't',
'ů': 'u', 'ž': 'z', 'Č': 'C', 'Ď': 'D', 'Ě': 'E', 'Ň': 'N', 'Ř': 'R',
'Š': 'S', 'Ť': 'T', 'Ů': 'U', 'Ž': 'Z'
};
const SLOVAK_MAP = {
'á': 'a', 'ä': 'a', 'č': 'c', 'ď': 'd', 'é': 'e', 'í': 'i', 'ľ': 'l',
'ĺ': 'l', 'ň': 'n', 'ó': 'o', 'ô': 'o', 'ŕ': 'r', 'š': 's', 'ť': 't',
'ú': 'u', 'ý': 'y', 'ž': 'z',
'Á': 'a', 'Ä': 'A', 'Č': 'C', 'Ď': 'D', 'É': 'E', 'Í': 'I', 'Ľ': 'L',
'Ĺ': 'L', 'Ň': 'N', 'Ó': 'O', 'Ô': 'O', 'Ŕ': 'R', 'Š': 'S', 'Ť': 'T',
'Ú': 'U', 'Ý': 'Y', 'Ž': 'Z'
};
const POLISH_MAP = {
'ą': 'a', 'ć': 'c', 'ę': 'e', 'ł': 'l', 'ń': 'n', 'ó': 'o', 'ś': 's',
'ź': 'z', 'ż': 'z',
'Ą': 'A', 'Ć': 'C', 'Ę': 'E', 'Ł': 'L', 'Ń': 'N', 'Ó': 'O', 'Ś': 'S',
'Ź': 'Z', 'Ż': 'Z'
};
const LATVIAN_MAP = {
'ā': 'a', 'č': 'c', 'ē': 'e', 'ģ': 'g', 'ī': 'i', 'ķ': 'k', 'ļ': 'l',
'ņ': 'n', 'š': 's', 'ū': 'u', 'ž': 'z',
'Ā': 'A', 'Č': 'C', 'Ē': 'E', 'Ģ': 'G', 'Ī': 'I', 'Ķ': 'K', 'Ļ': 'L',
'Ņ': 'N', 'Š': 'S', 'Ū': 'U', 'Ž': 'Z'
};
const ARABIC_MAP = {
'أ': 'a', 'ب': 'b', 'ت': 't', 'ث': 'th', 'ج': 'g', 'ح': 'h', 'خ': 'kh', 'د': 'd',
'ذ': 'th', 'ر': 'r', 'ز': 'z', 'س': 's', 'ش': 'sh', 'ص': 's', 'ض': 'd', 'ط': 't',
'ظ': 'th', 'ع': 'aa', 'غ': 'gh', 'ف': 'f', 'ق': 'k', 'ك': 'k', 'ل': 'l', 'م': 'm',
'ن': 'n', 'ه': 'h', 'و': 'o', 'ي': 'y'
};
const LITHUANIAN_MAP = {
'ą': 'a', 'č': 'c', 'ę': 'e', 'ė': 'e', 'į': 'i', 'š': 's', 'ų': 'u',
'ū': 'u', 'ž': 'z',
'Ą': 'A', 'Č': 'C', 'Ę': 'E', 'Ė': 'E', 'Į': 'I', 'Š': 'S', 'Ų': 'U',
'Ū': 'U', 'Ž': 'Z'
};
const SERBIAN_MAP = {
'ђ': 'dj', 'ј': 'j', 'љ': 'lj', 'њ': 'nj', 'ћ': 'c', 'џ': 'dz',
'đ': 'dj', 'Ђ': 'Dj', 'Ј': 'j', 'Љ': 'Lj', 'Њ': 'Nj', 'Ћ': 'C',
'Џ': 'Dz', 'Đ': 'Dj'
};
const AZERBAIJANI_MAP = {
'ç': 'c', 'ə': 'e', 'ğ': 'g', 'ı': 'i', 'ö': 'o', 'ş': 's', 'ü': 'u',
'Ç': 'C', 'Ə': 'E', 'Ğ': 'G', 'İ': 'I', 'Ö': 'O', 'Ş': 'S', 'Ü': 'U'
};
const GEORGIAN_MAP = {
'ა': 'a', 'ბ': 'b', 'გ': 'g', 'დ': 'd', 'ე': 'e', 'ვ': 'v', 'ზ': 'z',
'თ': 't', 'ი': 'i', 'კ': 'k', 'ლ': 'l', 'მ': 'm', 'ნ': 'n', 'ო': 'o',
'პ': 'p', 'ჟ': 'j', 'რ': 'r', 'ს': 's', 'ტ': 't', 'უ': 'u', 'ფ': 'f',
'ქ': 'q', 'ღ': 'g', 'ყ': 'y', 'შ': 'sh', 'ჩ': 'ch', 'ც': 'c', 'ძ': 'dz',
'წ': 'w', 'ჭ': 'ch', 'ხ': 'x', 'ჯ': 'j', 'ჰ': 'h'
};
const ALL_DOWNCODE_MAPS = [
LATIN_MAP,
LATIN_SYMBOLS_MAP,
GREEK_MAP,
TURKISH_MAP,
ROMANIAN_MAP,
RUSSIAN_MAP,
UKRAINIAN_MAP,
CZECH_MAP,
SLOVAK_MAP,
POLISH_MAP,
LATVIAN_MAP,
ARABIC_MAP,
LITHUANIAN_MAP,
SERBIAN_MAP,
AZERBAIJANI_MAP,
GEORGIAN_MAP
];
const Downcoder = {
'Initialize': function() {
if (Downcoder.map) { // already made
return;
}
Downcoder.map = {};
for (const lookup of ALL_DOWNCODE_MAPS) {
Object.assign(Downcoder.map, lookup);
}
Downcoder.regex = new RegExp(Object.keys(Downcoder.map).join('|'), 'g');
}
};
function downcode(slug) {
Downcoder.Initialize();
return slug.replace(Downcoder.regex, function(m) {
return Downcoder.map[m];
});
}
function URLify(s, num_chars, allowUnicode) {
// changes, e.g., "Petty theft" to "petty-theft"
if (!allowUnicode) {
s = downcode(s);
}
s = s.toLowerCase(); // convert to lowercase
// if downcode doesn't hit, the char will be stripped here
if (allowUnicode) {
// Keep Unicode letters including both lowercase and uppercase
// characters, whitespace, and dash; remove other characters.
s = XRegExp.replace(s, XRegExp('[^-_\\p{L}\\p{N}\\s]', 'g'), '');
} else {
s = s.replace(/[^-\w\s]/g, ''); // remove unneeded chars
}
s = s.replace(/^\s+|\s+$/g, ''); // trim leading/trailing spaces
s = s.replace(/[-\s]+/g, '-'); // convert spaces to hyphens
s = s.substring(0, num_chars); // trim to first num_chars chars
s = s.replace(/-+$/g, ''); // trim any trailing hyphens
return s;
}
window.URLify = URLify;
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/js/urlify.25cc3eac8123.js/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/urlify.25cc3eac8123.js",
"repo_id": "Django-locallibrary",
"token_count": 4853
} | 6 |
The MIT License (MIT)
Copyright (c) 2012-2017 Kevin Brown, Igor Vaynberg, and Select2 contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| Django-locallibrary/LocalLibrary/staticfiles/admin/js/vendor/select2/LICENSE.f94142512c91.md/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/vendor/select2/LICENSE.f94142512c91.md",
"repo_id": "Django-locallibrary",
"token_count": 287
} | 7 |
/*!
* Select2 4.0.13
* https://select2.github.io
*
* Released under the MIT license
* https://github.com/select2/select2/blob/master/LICENSE.md
*/
;(function (factory) {
if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define(['jquery'], factory);
} else if (typeof module === 'object' && module.exports) {
// Node/CommonJS
module.exports = function (root, jQuery) {
if (jQuery === undefined) {
// require('jQuery') returns a factory that requires window to
// build a jQuery instance, we normalize how we use modules
// that require this pattern but the window provided is a noop
// if it's defined (how jquery works)
if (typeof window !== 'undefined') {
jQuery = require('jquery');
}
else {
jQuery = require('jquery')(root);
}
}
factory(jQuery);
return jQuery;
};
} else {
// Browser globals
factory(jQuery);
}
} (function (jQuery) {
// This is needed so we can catch the AMD loader configuration and use it
// The inner file should be wrapped (by `banner.start.js`) in a function that
// returns the AMD loader references.
var S2 =(function () {
// Restore the Select2 AMD loader so it can be used
// Needed mostly in the language files, where the loader is not inserted
if (jQuery && jQuery.fn && jQuery.fn.select2 && jQuery.fn.select2.amd) {
var S2 = jQuery.fn.select2.amd;
}
var S2;(function () { if (!S2 || !S2.requirejs) {
if (!S2) { S2 = {}; } else { require = S2; }
/**
* @license almond 0.3.3 Copyright jQuery Foundation and other contributors.
* Released under MIT license, http://github.com/requirejs/almond/LICENSE
*/
//Going sloppy to avoid 'use strict' string cost, but strict practices should
//be followed.
/*global setTimeout: false */
var requirejs, require, define;
(function (undef) {
var main, req, makeMap, handlers,
defined = {},
waiting = {},
config = {},
defining = {},
hasOwn = Object.prototype.hasOwnProperty,
aps = [].slice,
jsSuffixRegExp = /\.js$/;
function hasProp(obj, prop) {
return hasOwn.call(obj, prop);
}
/**
* Given a relative module name, like ./something, normalize it to
* a real name that can be mapped to a path.
* @param {String} name the relative name
* @param {String} baseName a real name that the name arg is relative
* to.
* @returns {String} normalized name
*/
function normalize(name, baseName) {
var nameParts, nameSegment, mapValue, foundMap, lastIndex,
foundI, foundStarMap, starI, i, j, part, normalizedBaseParts,
baseParts = baseName && baseName.split("/"),
map = config.map,
starMap = (map && map['*']) || {};
//Adjust any relative paths.
if (name) {
name = name.split('/');
lastIndex = name.length - 1;
// If wanting node ID compatibility, strip .js from end
// of IDs. Have to do this here, and not in nameToUrl
// because node allows either .js or non .js to map
// to same file.
if (config.nodeIdCompat && jsSuffixRegExp.test(name[lastIndex])) {
name[lastIndex] = name[lastIndex].replace(jsSuffixRegExp, '');
}
// Starts with a '.' so need the baseName
if (name[0].charAt(0) === '.' && baseParts) {
//Convert baseName to array, and lop off the last part,
//so that . matches that 'directory' and not name of the baseName's
//module. For instance, baseName of 'one/two/three', maps to
//'one/two/three.js', but we want the directory, 'one/two' for
//this normalization.
normalizedBaseParts = baseParts.slice(0, baseParts.length - 1);
name = normalizedBaseParts.concat(name);
}
//start trimDots
for (i = 0; i < name.length; i++) {
part = name[i];
if (part === '.') {
name.splice(i, 1);
i -= 1;
} else if (part === '..') {
// If at the start, or previous value is still ..,
// keep them so that when converted to a path it may
// still work when converted to a path, even though
// as an ID it is less than ideal. In larger point
// releases, may be better to just kick out an error.
if (i === 0 || (i === 1 && name[2] === '..') || name[i - 1] === '..') {
continue;
} else if (i > 0) {
name.splice(i - 1, 2);
i -= 2;
}
}
}
//end trimDots
name = name.join('/');
}
//Apply map config if available.
if ((baseParts || starMap) && map) {
nameParts = name.split('/');
for (i = nameParts.length; i > 0; i -= 1) {
nameSegment = nameParts.slice(0, i).join("/");
if (baseParts) {
//Find the longest baseName segment match in the config.
//So, do joins on the biggest to smallest lengths of baseParts.
for (j = baseParts.length; j > 0; j -= 1) {
mapValue = map[baseParts.slice(0, j).join('/')];
//baseName segment has config, find if it has one for
//this name.
if (mapValue) {
mapValue = mapValue[nameSegment];
if (mapValue) {
//Match, update name to the new value.
foundMap = mapValue;
foundI = i;
break;
}
}
}
}
if (foundMap) {
break;
}
//Check for a star map match, but just hold on to it,
//if there is a shorter segment match later in a matching
//config, then favor over this star map.
if (!foundStarMap && starMap && starMap[nameSegment]) {
foundStarMap = starMap[nameSegment];
starI = i;
}
}
if (!foundMap && foundStarMap) {
foundMap = foundStarMap;
foundI = starI;
}
if (foundMap) {
nameParts.splice(0, foundI, foundMap);
name = nameParts.join('/');
}
}
return name;
}
function makeRequire(relName, forceSync) {
return function () {
//A version of a require function that passes a moduleName
//value for items that may need to
//look up paths relative to the moduleName
var args = aps.call(arguments, 0);
//If first arg is not require('string'), and there is only
//one arg, it is the array form without a callback. Insert
//a null so that the following concat is correct.
if (typeof args[0] !== 'string' && args.length === 1) {
args.push(null);
}
return req.apply(undef, args.concat([relName, forceSync]));
};
}
function makeNormalize(relName) {
return function (name) {
return normalize(name, relName);
};
}
function makeLoad(depName) {
return function (value) {
defined[depName] = value;
};
}
function callDep(name) {
if (hasProp(waiting, name)) {
var args = waiting[name];
delete waiting[name];
defining[name] = true;
main.apply(undef, args);
}
if (!hasProp(defined, name) && !hasProp(defining, name)) {
throw new Error('No ' + name);
}
return defined[name];
}
//Turns a plugin!resource to [plugin, resource]
//with the plugin being undefined if the name
//did not have a plugin prefix.
function splitPrefix(name) {
var prefix,
index = name ? name.indexOf('!') : -1;
if (index > -1) {
prefix = name.substring(0, index);
name = name.substring(index + 1, name.length);
}
return [prefix, name];
}
//Creates a parts array for a relName where first part is plugin ID,
//second part is resource ID. Assumes relName has already been normalized.
function makeRelParts(relName) {
return relName ? splitPrefix(relName) : [];
}
/**
* Makes a name map, normalizing the name, and using a plugin
* for normalization if necessary. Grabs a ref to plugin
* too, as an optimization.
*/
makeMap = function (name, relParts) {
var plugin,
parts = splitPrefix(name),
prefix = parts[0],
relResourceName = relParts[1];
name = parts[1];
if (prefix) {
prefix = normalize(prefix, relResourceName);
plugin = callDep(prefix);
}
//Normalize according
if (prefix) {
if (plugin && plugin.normalize) {
name = plugin.normalize(name, makeNormalize(relResourceName));
} else {
name = normalize(name, relResourceName);
}
} else {
name = normalize(name, relResourceName);
parts = splitPrefix(name);
prefix = parts[0];
name = parts[1];
if (prefix) {
plugin = callDep(prefix);
}
}
//Using ridiculous property names for space reasons
return {
f: prefix ? prefix + '!' + name : name, //fullName
n: name,
pr: prefix,
p: plugin
};
};
function makeConfig(name) {
return function () {
return (config && config.config && config.config[name]) || {};
};
}
handlers = {
require: function (name) {
return makeRequire(name);
},
exports: function (name) {
var e = defined[name];
if (typeof e !== 'undefined') {
return e;
} else {
return (defined[name] = {});
}
},
module: function (name) {
return {
id: name,
uri: '',
exports: defined[name],
config: makeConfig(name)
};
}
};
main = function (name, deps, callback, relName) {
var cjsModule, depName, ret, map, i, relParts,
args = [],
callbackType = typeof callback,
usingExports;
//Use name if no relName
relName = relName || name;
relParts = makeRelParts(relName);
//Call the callback to define the module, if necessary.
if (callbackType === 'undefined' || callbackType === 'function') {
//Pull out the defined dependencies and pass the ordered
//values to the callback.
//Default to [require, exports, module] if no deps
deps = !deps.length && callback.length ? ['require', 'exports', 'module'] : deps;
for (i = 0; i < deps.length; i += 1) {
map = makeMap(deps[i], relParts);
depName = map.f;
//Fast path CommonJS standard dependencies.
if (depName === "require") {
args[i] = handlers.require(name);
} else if (depName === "exports") {
//CommonJS module spec 1.1
args[i] = handlers.exports(name);
usingExports = true;
} else if (depName === "module") {
//CommonJS module spec 1.1
cjsModule = args[i] = handlers.module(name);
} else if (hasProp(defined, depName) ||
hasProp(waiting, depName) ||
hasProp(defining, depName)) {
args[i] = callDep(depName);
} else if (map.p) {
map.p.load(map.n, makeRequire(relName, true), makeLoad(depName), {});
args[i] = defined[depName];
} else {
throw new Error(name + ' missing ' + depName);
}
}
ret = callback ? callback.apply(defined[name], args) : undefined;
if (name) {
//If setting exports via "module" is in play,
//favor that over return value and exports. After that,
//favor a non-undefined return value over exports use.
if (cjsModule && cjsModule.exports !== undef &&
cjsModule.exports !== defined[name]) {
defined[name] = cjsModule.exports;
} else if (ret !== undef || !usingExports) {
//Use the return value from the function.
defined[name] = ret;
}
}
} else if (name) {
//May just be an object definition for the module. Only
//worry about defining if have a module name.
defined[name] = callback;
}
};
requirejs = require = req = function (deps, callback, relName, forceSync, alt) {
if (typeof deps === "string") {
if (handlers[deps]) {
//callback in this case is really relName
return handlers[deps](callback);
}
//Just return the module wanted. In this scenario, the
//deps arg is the module name, and second arg (if passed)
//is just the relName.
//Normalize module name, if it contains . or ..
return callDep(makeMap(deps, makeRelParts(callback)).f);
} else if (!deps.splice) {
//deps is a config object, not an array.
config = deps;
if (config.deps) {
req(config.deps, config.callback);
}
if (!callback) {
return;
}
if (callback.splice) {
//callback is an array, which means it is a dependency list.
//Adjust args if there are dependencies
deps = callback;
callback = relName;
relName = null;
} else {
deps = undef;
}
}
//Support require(['a'])
callback = callback || function () {};
//If relName is a function, it is an errback handler,
//so remove it.
if (typeof relName === 'function') {
relName = forceSync;
forceSync = alt;
}
//Simulate async callback;
if (forceSync) {
main(undef, deps, callback, relName);
} else {
//Using a non-zero value because of concern for what old browsers
//do, and latest browsers "upgrade" to 4 if lower value is used:
//http://www.whatwg.org/specs/web-apps/current-work/multipage/timers.html#dom-windowtimers-settimeout:
//If want a value immediately, use require('id') instead -- something
//that works in almond on the global level, but not guaranteed and
//unlikely to work in other AMD implementations.
setTimeout(function () {
main(undef, deps, callback, relName);
}, 4);
}
return req;
};
/**
* Just drops the config on the floor, but returns req in case
* the config return value is used.
*/
req.config = function (cfg) {
return req(cfg);
};
/**
* Expose module registry for debugging and tooling
*/
requirejs._defined = defined;
define = function (name, deps, callback) {
if (typeof name !== 'string') {
throw new Error('See almond README: incorrect module build, no module name');
}
//This module may not have dependencies
if (!deps.splice) {
//deps is not an array, so probably means
//an object literal or factory function for
//the value. Adjust args.
callback = deps;
deps = [];
}
if (!hasProp(defined, name) && !hasProp(waiting, name)) {
waiting[name] = [name, deps, callback];
}
};
define.amd = {
jQuery: true
};
}());
S2.requirejs = requirejs;S2.require = require;S2.define = define;
}
}());
S2.define("almond", function(){});
/* global jQuery:false, $:false */
S2.define('jquery',[],function () {
var _$ = jQuery || $;
if (_$ == null && console && console.error) {
console.error(
'Select2: An instance of jQuery or a jQuery-compatible library was not ' +
'found. Make sure that you are including jQuery before Select2 on your ' +
'web page.'
);
}
return _$;
});
S2.define('select2/utils',[
'jquery'
], function ($) {
var Utils = {};
Utils.Extend = function (ChildClass, SuperClass) {
var __hasProp = {}.hasOwnProperty;
function BaseConstructor () {
this.constructor = ChildClass;
}
for (var key in SuperClass) {
if (__hasProp.call(SuperClass, key)) {
ChildClass[key] = SuperClass[key];
}
}
BaseConstructor.prototype = SuperClass.prototype;
ChildClass.prototype = new BaseConstructor();
ChildClass.__super__ = SuperClass.prototype;
return ChildClass;
};
function getMethods (theClass) {
var proto = theClass.prototype;
var methods = [];
for (var methodName in proto) {
var m = proto[methodName];
if (typeof m !== 'function') {
continue;
}
if (methodName === 'constructor') {
continue;
}
methods.push(methodName);
}
return methods;
}
Utils.Decorate = function (SuperClass, DecoratorClass) {
var decoratedMethods = getMethods(DecoratorClass);
var superMethods = getMethods(SuperClass);
function DecoratedClass () {
var unshift = Array.prototype.unshift;
var argCount = DecoratorClass.prototype.constructor.length;
var calledConstructor = SuperClass.prototype.constructor;
if (argCount > 0) {
unshift.call(arguments, SuperClass.prototype.constructor);
calledConstructor = DecoratorClass.prototype.constructor;
}
calledConstructor.apply(this, arguments);
}
DecoratorClass.displayName = SuperClass.displayName;
function ctr () {
this.constructor = DecoratedClass;
}
DecoratedClass.prototype = new ctr();
for (var m = 0; m < superMethods.length; m++) {
var superMethod = superMethods[m];
DecoratedClass.prototype[superMethod] =
SuperClass.prototype[superMethod];
}
var calledMethod = function (methodName) {
// Stub out the original method if it's not decorating an actual method
var originalMethod = function () {};
if (methodName in DecoratedClass.prototype) {
originalMethod = DecoratedClass.prototype[methodName];
}
var decoratedMethod = DecoratorClass.prototype[methodName];
return function () {
var unshift = Array.prototype.unshift;
unshift.call(arguments, originalMethod);
return decoratedMethod.apply(this, arguments);
};
};
for (var d = 0; d < decoratedMethods.length; d++) {
var decoratedMethod = decoratedMethods[d];
DecoratedClass.prototype[decoratedMethod] = calledMethod(decoratedMethod);
}
return DecoratedClass;
};
var Observable = function () {
this.listeners = {};
};
Observable.prototype.on = function (event, callback) {
this.listeners = this.listeners || {};
if (event in this.listeners) {
this.listeners[event].push(callback);
} else {
this.listeners[event] = [callback];
}
};
Observable.prototype.trigger = function (event) {
var slice = Array.prototype.slice;
var params = slice.call(arguments, 1);
this.listeners = this.listeners || {};
// Params should always come in as an array
if (params == null) {
params = [];
}
// If there are no arguments to the event, use a temporary object
if (params.length === 0) {
params.push({});
}
// Set the `_type` of the first object to the event
params[0]._type = event;
if (event in this.listeners) {
this.invoke(this.listeners[event], slice.call(arguments, 1));
}
if ('*' in this.listeners) {
this.invoke(this.listeners['*'], arguments);
}
};
Observable.prototype.invoke = function (listeners, params) {
for (var i = 0, len = listeners.length; i < len; i++) {
listeners[i].apply(this, params);
}
};
Utils.Observable = Observable;
Utils.generateChars = function (length) {
var chars = '';
for (var i = 0; i < length; i++) {
var randomChar = Math.floor(Math.random() * 36);
chars += randomChar.toString(36);
}
return chars;
};
Utils.bind = function (func, context) {
return function () {
func.apply(context, arguments);
};
};
Utils._convertData = function (data) {
for (var originalKey in data) {
var keys = originalKey.split('-');
var dataLevel = data;
if (keys.length === 1) {
continue;
}
for (var k = 0; k < keys.length; k++) {
var key = keys[k];
// Lowercase the first letter
// By default, dash-separated becomes camelCase
key = key.substring(0, 1).toLowerCase() + key.substring(1);
if (!(key in dataLevel)) {
dataLevel[key] = {};
}
if (k == keys.length - 1) {
dataLevel[key] = data[originalKey];
}
dataLevel = dataLevel[key];
}
delete data[originalKey];
}
return data;
};
Utils.hasScroll = function (index, el) {
// Adapted from the function created by @ShadowScripter
// and adapted by @BillBarry on the Stack Exchange Code Review website.
// The original code can be found at
// http://codereview.stackexchange.com/q/13338
// and was designed to be used with the Sizzle selector engine.
var $el = $(el);
var overflowX = el.style.overflowX;
var overflowY = el.style.overflowY;
//Check both x and y declarations
if (overflowX === overflowY &&
(overflowY === 'hidden' || overflowY === 'visible')) {
return false;
}
if (overflowX === 'scroll' || overflowY === 'scroll') {
return true;
}
return ($el.innerHeight() < el.scrollHeight ||
$el.innerWidth() < el.scrollWidth);
};
Utils.escapeMarkup = function (markup) {
var replaceMap = {
'\\': '\',
'&': '&',
'<': '<',
'>': '>',
'"': '"',
'\'': ''',
'/': '/'
};
// Do not try to escape the markup if it's not a string
if (typeof markup !== 'string') {
return markup;
}
return String(markup).replace(/[&<>"'\/\\]/g, function (match) {
return replaceMap[match];
});
};
// Append an array of jQuery nodes to a given element.
Utils.appendMany = function ($element, $nodes) {
// jQuery 1.7.x does not support $.fn.append() with an array
// Fall back to a jQuery object collection using $.fn.add()
if ($.fn.jquery.substr(0, 3) === '1.7') {
var $jqNodes = $();
$.map($nodes, function (node) {
$jqNodes = $jqNodes.add(node);
});
$nodes = $jqNodes;
}
$element.append($nodes);
};
// Cache objects in Utils.__cache instead of $.data (see #4346)
Utils.__cache = {};
var id = 0;
Utils.GetUniqueElementId = function (element) {
// Get a unique element Id. If element has no id,
// creates a new unique number, stores it in the id
// attribute and returns the new id.
// If an id already exists, it simply returns it.
var select2Id = element.getAttribute('data-select2-id');
if (select2Id == null) {
// If element has id, use it.
if (element.id) {
select2Id = element.id;
element.setAttribute('data-select2-id', select2Id);
} else {
element.setAttribute('data-select2-id', ++id);
select2Id = id.toString();
}
}
return select2Id;
};
Utils.StoreData = function (element, name, value) {
// Stores an item in the cache for a specified element.
// name is the cache key.
var id = Utils.GetUniqueElementId(element);
if (!Utils.__cache[id]) {
Utils.__cache[id] = {};
}
Utils.__cache[id][name] = value;
};
Utils.GetData = function (element, name) {
// Retrieves a value from the cache by its key (name)
// name is optional. If no name specified, return
// all cache items for the specified element.
// and for a specified element.
var id = Utils.GetUniqueElementId(element);
if (name) {
if (Utils.__cache[id]) {
if (Utils.__cache[id][name] != null) {
return Utils.__cache[id][name];
}
return $(element).data(name); // Fallback to HTML5 data attribs.
}
return $(element).data(name); // Fallback to HTML5 data attribs.
} else {
return Utils.__cache[id];
}
};
Utils.RemoveData = function (element) {
// Removes all cached items for a specified element.
var id = Utils.GetUniqueElementId(element);
if (Utils.__cache[id] != null) {
delete Utils.__cache[id];
}
element.removeAttribute('data-select2-id');
};
return Utils;
});
S2.define('select2/results',[
'jquery',
'./utils'
], function ($, Utils) {
function Results ($element, options, dataAdapter) {
this.$element = $element;
this.data = dataAdapter;
this.options = options;
Results.__super__.constructor.call(this);
}
Utils.Extend(Results, Utils.Observable);
Results.prototype.render = function () {
var $results = $(
'<ul class="select2-results__options" role="listbox"></ul>'
);
if (this.options.get('multiple')) {
$results.attr('aria-multiselectable', 'true');
}
this.$results = $results;
return $results;
};
Results.prototype.clear = function () {
this.$results.empty();
};
Results.prototype.displayMessage = function (params) {
var escapeMarkup = this.options.get('escapeMarkup');
this.clear();
this.hideLoading();
var $message = $(
'<li role="alert" aria-live="assertive"' +
' class="select2-results__option"></li>'
);
var message = this.options.get('translations').get(params.message);
$message.append(
escapeMarkup(
message(params.args)
)
);
$message[0].className += ' select2-results__message';
this.$results.append($message);
};
Results.prototype.hideMessages = function () {
this.$results.find('.select2-results__message').remove();
};
Results.prototype.append = function (data) {
this.hideLoading();
var $options = [];
if (data.results == null || data.results.length === 0) {
if (this.$results.children().length === 0) {
this.trigger('results:message', {
message: 'noResults'
});
}
return;
}
data.results = this.sort(data.results);
for (var d = 0; d < data.results.length; d++) {
var item = data.results[d];
var $option = this.option(item);
$options.push($option);
}
this.$results.append($options);
};
Results.prototype.position = function ($results, $dropdown) {
var $resultsContainer = $dropdown.find('.select2-results');
$resultsContainer.append($results);
};
Results.prototype.sort = function (data) {
var sorter = this.options.get('sorter');
return sorter(data);
};
Results.prototype.highlightFirstItem = function () {
var $options = this.$results
.find('.select2-results__option[aria-selected]');
var $selected = $options.filter('[aria-selected=true]');
// Check if there are any selected options
if ($selected.length > 0) {
// If there are selected options, highlight the first
$selected.first().trigger('mouseenter');
} else {
// If there are no selected options, highlight the first option
// in the dropdown
$options.first().trigger('mouseenter');
}
this.ensureHighlightVisible();
};
Results.prototype.setClasses = function () {
var self = this;
this.data.current(function (selected) {
var selectedIds = $.map(selected, function (s) {
return s.id.toString();
});
var $options = self.$results
.find('.select2-results__option[aria-selected]');
$options.each(function () {
var $option = $(this);
var item = Utils.GetData(this, 'data');
// id needs to be converted to a string when comparing
var id = '' + item.id;
if ((item.element != null && item.element.selected) ||
(item.element == null && $.inArray(id, selectedIds) > -1)) {
$option.attr('aria-selected', 'true');
} else {
$option.attr('aria-selected', 'false');
}
});
});
};
Results.prototype.showLoading = function (params) {
this.hideLoading();
var loadingMore = this.options.get('translations').get('searching');
var loading = {
disabled: true,
loading: true,
text: loadingMore(params)
};
var $loading = this.option(loading);
$loading.className += ' loading-results';
this.$results.prepend($loading);
};
Results.prototype.hideLoading = function () {
this.$results.find('.loading-results').remove();
};
Results.prototype.option = function (data) {
var option = document.createElement('li');
option.className = 'select2-results__option';
var attrs = {
'role': 'option',
'aria-selected': 'false'
};
var matches = window.Element.prototype.matches ||
window.Element.prototype.msMatchesSelector ||
window.Element.prototype.webkitMatchesSelector;
if ((data.element != null && matches.call(data.element, ':disabled')) ||
(data.element == null && data.disabled)) {
delete attrs['aria-selected'];
attrs['aria-disabled'] = 'true';
}
if (data.id == null) {
delete attrs['aria-selected'];
}
if (data._resultId != null) {
option.id = data._resultId;
}
if (data.title) {
option.title = data.title;
}
if (data.children) {
attrs.role = 'group';
attrs['aria-label'] = data.text;
delete attrs['aria-selected'];
}
for (var attr in attrs) {
var val = attrs[attr];
option.setAttribute(attr, val);
}
if (data.children) {
var $option = $(option);
var label = document.createElement('strong');
label.className = 'select2-results__group';
var $label = $(label);
this.template(data, label);
var $children = [];
for (var c = 0; c < data.children.length; c++) {
var child = data.children[c];
var $child = this.option(child);
$children.push($child);
}
var $childrenContainer = $('<ul></ul>', {
'class': 'select2-results__options select2-results__options--nested'
});
$childrenContainer.append($children);
$option.append(label);
$option.append($childrenContainer);
} else {
this.template(data, option);
}
Utils.StoreData(option, 'data', data);
return option;
};
Results.prototype.bind = function (container, $container) {
var self = this;
var id = container.id + '-results';
this.$results.attr('id', id);
container.on('results:all', function (params) {
self.clear();
self.append(params.data);
if (container.isOpen()) {
self.setClasses();
self.highlightFirstItem();
}
});
container.on('results:append', function (params) {
self.append(params.data);
if (container.isOpen()) {
self.setClasses();
}
});
container.on('query', function (params) {
self.hideMessages();
self.showLoading(params);
});
container.on('select', function () {
if (!container.isOpen()) {
return;
}
self.setClasses();
if (self.options.get('scrollAfterSelect')) {
self.highlightFirstItem();
}
});
container.on('unselect', function () {
if (!container.isOpen()) {
return;
}
self.setClasses();
if (self.options.get('scrollAfterSelect')) {
self.highlightFirstItem();
}
});
container.on('open', function () {
// When the dropdown is open, aria-expended="true"
self.$results.attr('aria-expanded', 'true');
self.$results.attr('aria-hidden', 'false');
self.setClasses();
self.ensureHighlightVisible();
});
container.on('close', function () {
// When the dropdown is closed, aria-expended="false"
self.$results.attr('aria-expanded', 'false');
self.$results.attr('aria-hidden', 'true');
self.$results.removeAttr('aria-activedescendant');
});
container.on('results:toggle', function () {
var $highlighted = self.getHighlightedResults();
if ($highlighted.length === 0) {
return;
}
$highlighted.trigger('mouseup');
});
container.on('results:select', function () {
var $highlighted = self.getHighlightedResults();
if ($highlighted.length === 0) {
return;
}
var data = Utils.GetData($highlighted[0], 'data');
if ($highlighted.attr('aria-selected') == 'true') {
self.trigger('close', {});
} else {
self.trigger('select', {
data: data
});
}
});
container.on('results:previous', function () {
var $highlighted = self.getHighlightedResults();
var $options = self.$results.find('[aria-selected]');
var currentIndex = $options.index($highlighted);
// If we are already at the top, don't move further
// If no options, currentIndex will be -1
if (currentIndex <= 0) {
return;
}
var nextIndex = currentIndex - 1;
// If none are highlighted, highlight the first
if ($highlighted.length === 0) {
nextIndex = 0;
}
var $next = $options.eq(nextIndex);
$next.trigger('mouseenter');
var currentOffset = self.$results.offset().top;
var nextTop = $next.offset().top;
var nextOffset = self.$results.scrollTop() + (nextTop - currentOffset);
if (nextIndex === 0) {
self.$results.scrollTop(0);
} else if (nextTop - currentOffset < 0) {
self.$results.scrollTop(nextOffset);
}
});
container.on('results:next', function () {
var $highlighted = self.getHighlightedResults();
var $options = self.$results.find('[aria-selected]');
var currentIndex = $options.index($highlighted);
var nextIndex = currentIndex + 1;
// If we are at the last option, stay there
if (nextIndex >= $options.length) {
return;
}
var $next = $options.eq(nextIndex);
$next.trigger('mouseenter');
var currentOffset = self.$results.offset().top +
self.$results.outerHeight(false);
var nextBottom = $next.offset().top + $next.outerHeight(false);
var nextOffset = self.$results.scrollTop() + nextBottom - currentOffset;
if (nextIndex === 0) {
self.$results.scrollTop(0);
} else if (nextBottom > currentOffset) {
self.$results.scrollTop(nextOffset);
}
});
container.on('results:focus', function (params) {
params.element.addClass('select2-results__option--highlighted');
});
container.on('results:message', function (params) {
self.displayMessage(params);
});
if ($.fn.mousewheel) {
this.$results.on('mousewheel', function (e) {
var top = self.$results.scrollTop();
var bottom = self.$results.get(0).scrollHeight - top + e.deltaY;
var isAtTop = e.deltaY > 0 && top - e.deltaY <= 0;
var isAtBottom = e.deltaY < 0 && bottom <= self.$results.height();
if (isAtTop) {
self.$results.scrollTop(0);
e.preventDefault();
e.stopPropagation();
} else if (isAtBottom) {
self.$results.scrollTop(
self.$results.get(0).scrollHeight - self.$results.height()
);
e.preventDefault();
e.stopPropagation();
}
});
}
this.$results.on('mouseup', '.select2-results__option[aria-selected]',
function (evt) {
var $this = $(this);
var data = Utils.GetData(this, 'data');
if ($this.attr('aria-selected') === 'true') {
if (self.options.get('multiple')) {
self.trigger('unselect', {
originalEvent: evt,
data: data
});
} else {
self.trigger('close', {});
}
return;
}
self.trigger('select', {
originalEvent: evt,
data: data
});
});
this.$results.on('mouseenter', '.select2-results__option[aria-selected]',
function (evt) {
var data = Utils.GetData(this, 'data');
self.getHighlightedResults()
.removeClass('select2-results__option--highlighted');
self.trigger('results:focus', {
data: data,
element: $(this)
});
});
};
Results.prototype.getHighlightedResults = function () {
var $highlighted = this.$results
.find('.select2-results__option--highlighted');
return $highlighted;
};
Results.prototype.destroy = function () {
this.$results.remove();
};
Results.prototype.ensureHighlightVisible = function () {
var $highlighted = this.getHighlightedResults();
if ($highlighted.length === 0) {
return;
}
var $options = this.$results.find('[aria-selected]');
var currentIndex = $options.index($highlighted);
var currentOffset = this.$results.offset().top;
var nextTop = $highlighted.offset().top;
var nextOffset = this.$results.scrollTop() + (nextTop - currentOffset);
var offsetDelta = nextTop - currentOffset;
nextOffset -= $highlighted.outerHeight(false) * 2;
if (currentIndex <= 2) {
this.$results.scrollTop(0);
} else if (offsetDelta > this.$results.outerHeight() || offsetDelta < 0) {
this.$results.scrollTop(nextOffset);
}
};
Results.prototype.template = function (result, container) {
var template = this.options.get('templateResult');
var escapeMarkup = this.options.get('escapeMarkup');
var content = template(result, container);
if (content == null) {
container.style.display = 'none';
} else if (typeof content === 'string') {
container.innerHTML = escapeMarkup(content);
} else {
$(container).append(content);
}
};
return Results;
});
S2.define('select2/keys',[
], function () {
var KEYS = {
BACKSPACE: 8,
TAB: 9,
ENTER: 13,
SHIFT: 16,
CTRL: 17,
ALT: 18,
ESC: 27,
SPACE: 32,
PAGE_UP: 33,
PAGE_DOWN: 34,
END: 35,
HOME: 36,
LEFT: 37,
UP: 38,
RIGHT: 39,
DOWN: 40,
DELETE: 46
};
return KEYS;
});
S2.define('select2/selection/base',[
'jquery',
'../utils',
'../keys'
], function ($, Utils, KEYS) {
function BaseSelection ($element, options) {
this.$element = $element;
this.options = options;
BaseSelection.__super__.constructor.call(this);
}
Utils.Extend(BaseSelection, Utils.Observable);
BaseSelection.prototype.render = function () {
var $selection = $(
'<span class="select2-selection" role="combobox" ' +
' aria-haspopup="true" aria-expanded="false">' +
'</span>'
);
this._tabindex = 0;
if (Utils.GetData(this.$element[0], 'old-tabindex') != null) {
this._tabindex = Utils.GetData(this.$element[0], 'old-tabindex');
} else if (this.$element.attr('tabindex') != null) {
this._tabindex = this.$element.attr('tabindex');
}
$selection.attr('title', this.$element.attr('title'));
$selection.attr('tabindex', this._tabindex);
$selection.attr('aria-disabled', 'false');
this.$selection = $selection;
return $selection;
};
BaseSelection.prototype.bind = function (container, $container) {
var self = this;
var resultsId = container.id + '-results';
this.container = container;
this.$selection.on('focus', function (evt) {
self.trigger('focus', evt);
});
this.$selection.on('blur', function (evt) {
self._handleBlur(evt);
});
this.$selection.on('keydown', function (evt) {
self.trigger('keypress', evt);
if (evt.which === KEYS.SPACE) {
evt.preventDefault();
}
});
container.on('results:focus', function (params) {
self.$selection.attr('aria-activedescendant', params.data._resultId);
});
container.on('selection:update', function (params) {
self.update(params.data);
});
container.on('open', function () {
// When the dropdown is open, aria-expanded="true"
self.$selection.attr('aria-expanded', 'true');
self.$selection.attr('aria-owns', resultsId);
self._attachCloseHandler(container);
});
container.on('close', function () {
// When the dropdown is closed, aria-expanded="false"
self.$selection.attr('aria-expanded', 'false');
self.$selection.removeAttr('aria-activedescendant');
self.$selection.removeAttr('aria-owns');
self.$selection.trigger('focus');
self._detachCloseHandler(container);
});
container.on('enable', function () {
self.$selection.attr('tabindex', self._tabindex);
self.$selection.attr('aria-disabled', 'false');
});
container.on('disable', function () {
self.$selection.attr('tabindex', '-1');
self.$selection.attr('aria-disabled', 'true');
});
};
BaseSelection.prototype._handleBlur = function (evt) {
var self = this;
// This needs to be delayed as the active element is the body when the tab
// key is pressed, possibly along with others.
window.setTimeout(function () {
// Don't trigger `blur` if the focus is still in the selection
if (
(document.activeElement == self.$selection[0]) ||
($.contains(self.$selection[0], document.activeElement))
) {
return;
}
self.trigger('blur', evt);
}, 1);
};
BaseSelection.prototype._attachCloseHandler = function (container) {
$(document.body).on('mousedown.select2.' + container.id, function (e) {
var $target = $(e.target);
var $select = $target.closest('.select2');
var $all = $('.select2.select2-container--open');
$all.each(function () {
if (this == $select[0]) {
return;
}
var $element = Utils.GetData(this, 'element');
$element.select2('close');
});
});
};
BaseSelection.prototype._detachCloseHandler = function (container) {
$(document.body).off('mousedown.select2.' + container.id);
};
BaseSelection.prototype.position = function ($selection, $container) {
var $selectionContainer = $container.find('.selection');
$selectionContainer.append($selection);
};
BaseSelection.prototype.destroy = function () {
this._detachCloseHandler(this.container);
};
BaseSelection.prototype.update = function (data) {
throw new Error('The `update` method must be defined in child classes.');
};
/**
* Helper method to abstract the "enabled" (not "disabled") state of this
* object.
*
* @return {true} if the instance is not disabled.
* @return {false} if the instance is disabled.
*/
BaseSelection.prototype.isEnabled = function () {
return !this.isDisabled();
};
/**
* Helper method to abstract the "disabled" state of this object.
*
* @return {true} if the disabled option is true.
* @return {false} if the disabled option is false.
*/
BaseSelection.prototype.isDisabled = function () {
return this.options.get('disabled');
};
return BaseSelection;
});
S2.define('select2/selection/single',[
'jquery',
'./base',
'../utils',
'../keys'
], function ($, BaseSelection, Utils, KEYS) {
function SingleSelection () {
SingleSelection.__super__.constructor.apply(this, arguments);
}
Utils.Extend(SingleSelection, BaseSelection);
SingleSelection.prototype.render = function () {
var $selection = SingleSelection.__super__.render.call(this);
$selection.addClass('select2-selection--single');
$selection.html(
'<span class="select2-selection__rendered"></span>' +
'<span class="select2-selection__arrow" role="presentation">' +
'<b role="presentation"></b>' +
'</span>'
);
return $selection;
};
SingleSelection.prototype.bind = function (container, $container) {
var self = this;
SingleSelection.__super__.bind.apply(this, arguments);
var id = container.id + '-container';
this.$selection.find('.select2-selection__rendered')
.attr('id', id)
.attr('role', 'textbox')
.attr('aria-readonly', 'true');
this.$selection.attr('aria-labelledby', id);
this.$selection.on('mousedown', function (evt) {
// Only respond to left clicks
if (evt.which !== 1) {
return;
}
self.trigger('toggle', {
originalEvent: evt
});
});
this.$selection.on('focus', function (evt) {
// User focuses on the container
});
this.$selection.on('blur', function (evt) {
// User exits the container
});
container.on('focus', function (evt) {
if (!container.isOpen()) {
self.$selection.trigger('focus');
}
});
};
SingleSelection.prototype.clear = function () {
var $rendered = this.$selection.find('.select2-selection__rendered');
$rendered.empty();
$rendered.removeAttr('title'); // clear tooltip on empty
};
SingleSelection.prototype.display = function (data, container) {
var template = this.options.get('templateSelection');
var escapeMarkup = this.options.get('escapeMarkup');
return escapeMarkup(template(data, container));
};
SingleSelection.prototype.selectionContainer = function () {
return $('<span></span>');
};
SingleSelection.prototype.update = function (data) {
if (data.length === 0) {
this.clear();
return;
}
var selection = data[0];
var $rendered = this.$selection.find('.select2-selection__rendered');
var formatted = this.display(selection, $rendered);
$rendered.empty().append(formatted);
var title = selection.title || selection.text;
if (title) {
$rendered.attr('title', title);
} else {
$rendered.removeAttr('title');
}
};
return SingleSelection;
});
S2.define('select2/selection/multiple',[
'jquery',
'./base',
'../utils'
], function ($, BaseSelection, Utils) {
function MultipleSelection ($element, options) {
MultipleSelection.__super__.constructor.apply(this, arguments);
}
Utils.Extend(MultipleSelection, BaseSelection);
MultipleSelection.prototype.render = function () {
var $selection = MultipleSelection.__super__.render.call(this);
$selection.addClass('select2-selection--multiple');
$selection.html(
'<ul class="select2-selection__rendered"></ul>'
);
return $selection;
};
MultipleSelection.prototype.bind = function (container, $container) {
var self = this;
MultipleSelection.__super__.bind.apply(this, arguments);
this.$selection.on('click', function (evt) {
self.trigger('toggle', {
originalEvent: evt
});
});
this.$selection.on(
'click',
'.select2-selection__choice__remove',
function (evt) {
// Ignore the event if it is disabled
if (self.isDisabled()) {
return;
}
var $remove = $(this);
var $selection = $remove.parent();
var data = Utils.GetData($selection[0], 'data');
self.trigger('unselect', {
originalEvent: evt,
data: data
});
}
);
};
MultipleSelection.prototype.clear = function () {
var $rendered = this.$selection.find('.select2-selection__rendered');
$rendered.empty();
$rendered.removeAttr('title');
};
MultipleSelection.prototype.display = function (data, container) {
var template = this.options.get('templateSelection');
var escapeMarkup = this.options.get('escapeMarkup');
return escapeMarkup(template(data, container));
};
MultipleSelection.prototype.selectionContainer = function () {
var $container = $(
'<li class="select2-selection__choice">' +
'<span class="select2-selection__choice__remove" role="presentation">' +
'×' +
'</span>' +
'</li>'
);
return $container;
};
MultipleSelection.prototype.update = function (data) {
this.clear();
if (data.length === 0) {
return;
}
var $selections = [];
for (var d = 0; d < data.length; d++) {
var selection = data[d];
var $selection = this.selectionContainer();
var formatted = this.display(selection, $selection);
$selection.append(formatted);
var title = selection.title || selection.text;
if (title) {
$selection.attr('title', title);
}
Utils.StoreData($selection[0], 'data', selection);
$selections.push($selection);
}
var $rendered = this.$selection.find('.select2-selection__rendered');
Utils.appendMany($rendered, $selections);
};
return MultipleSelection;
});
S2.define('select2/selection/placeholder',[
'../utils'
], function (Utils) {
function Placeholder (decorated, $element, options) {
this.placeholder = this.normalizePlaceholder(options.get('placeholder'));
decorated.call(this, $element, options);
}
Placeholder.prototype.normalizePlaceholder = function (_, placeholder) {
if (typeof placeholder === 'string') {
placeholder = {
id: '',
text: placeholder
};
}
return placeholder;
};
Placeholder.prototype.createPlaceholder = function (decorated, placeholder) {
var $placeholder = this.selectionContainer();
$placeholder.html(this.display(placeholder));
$placeholder.addClass('select2-selection__placeholder')
.removeClass('select2-selection__choice');
return $placeholder;
};
Placeholder.prototype.update = function (decorated, data) {
var singlePlaceholder = (
data.length == 1 && data[0].id != this.placeholder.id
);
var multipleSelections = data.length > 1;
if (multipleSelections || singlePlaceholder) {
return decorated.call(this, data);
}
this.clear();
var $placeholder = this.createPlaceholder(this.placeholder);
this.$selection.find('.select2-selection__rendered').append($placeholder);
};
return Placeholder;
});
S2.define('select2/selection/allowClear',[
'jquery',
'../keys',
'../utils'
], function ($, KEYS, Utils) {
function AllowClear () { }
AllowClear.prototype.bind = function (decorated, container, $container) {
var self = this;
decorated.call(this, container, $container);
if (this.placeholder == null) {
if (this.options.get('debug') && window.console && console.error) {
console.error(
'Select2: The `allowClear` option should be used in combination ' +
'with the `placeholder` option.'
);
}
}
this.$selection.on('mousedown', '.select2-selection__clear',
function (evt) {
self._handleClear(evt);
});
container.on('keypress', function (evt) {
self._handleKeyboardClear(evt, container);
});
};
AllowClear.prototype._handleClear = function (_, evt) {
// Ignore the event if it is disabled
if (this.isDisabled()) {
return;
}
var $clear = this.$selection.find('.select2-selection__clear');
// Ignore the event if nothing has been selected
if ($clear.length === 0) {
return;
}
evt.stopPropagation();
var data = Utils.GetData($clear[0], 'data');
var previousVal = this.$element.val();
this.$element.val(this.placeholder.id);
var unselectData = {
data: data
};
this.trigger('clear', unselectData);
if (unselectData.prevented) {
this.$element.val(previousVal);
return;
}
for (var d = 0; d < data.length; d++) {
unselectData = {
data: data[d]
};
// Trigger the `unselect` event, so people can prevent it from being
// cleared.
this.trigger('unselect', unselectData);
// If the event was prevented, don't clear it out.
if (unselectData.prevented) {
this.$element.val(previousVal);
return;
}
}
this.$element.trigger('input').trigger('change');
this.trigger('toggle', {});
};
AllowClear.prototype._handleKeyboardClear = function (_, evt, container) {
if (container.isOpen()) {
return;
}
if (evt.which == KEYS.DELETE || evt.which == KEYS.BACKSPACE) {
this._handleClear(evt);
}
};
AllowClear.prototype.update = function (decorated, data) {
decorated.call(this, data);
if (this.$selection.find('.select2-selection__placeholder').length > 0 ||
data.length === 0) {
return;
}
var removeAll = this.options.get('translations').get('removeAllItems');
var $remove = $(
'<span class="select2-selection__clear" title="' + removeAll() +'">' +
'×' +
'</span>'
);
Utils.StoreData($remove[0], 'data', data);
this.$selection.find('.select2-selection__rendered').prepend($remove);
};
return AllowClear;
});
S2.define('select2/selection/search',[
'jquery',
'../utils',
'../keys'
], function ($, Utils, KEYS) {
function Search (decorated, $element, options) {
decorated.call(this, $element, options);
}
Search.prototype.render = function (decorated) {
var $search = $(
'<li class="select2-search select2-search--inline">' +
'<input class="select2-search__field" type="search" tabindex="-1"' +
' autocomplete="off" autocorrect="off" autocapitalize="none"' +
' spellcheck="false" role="searchbox" aria-autocomplete="list" />' +
'</li>'
);
this.$searchContainer = $search;
this.$search = $search.find('input');
var $rendered = decorated.call(this);
this._transferTabIndex();
return $rendered;
};
Search.prototype.bind = function (decorated, container, $container) {
var self = this;
var resultsId = container.id + '-results';
decorated.call(this, container, $container);
container.on('open', function () {
self.$search.attr('aria-controls', resultsId);
self.$search.trigger('focus');
});
container.on('close', function () {
self.$search.val('');
self.$search.removeAttr('aria-controls');
self.$search.removeAttr('aria-activedescendant');
self.$search.trigger('focus');
});
container.on('enable', function () {
self.$search.prop('disabled', false);
self._transferTabIndex();
});
container.on('disable', function () {
self.$search.prop('disabled', true);
});
container.on('focus', function (evt) {
self.$search.trigger('focus');
});
container.on('results:focus', function (params) {
if (params.data._resultId) {
self.$search.attr('aria-activedescendant', params.data._resultId);
} else {
self.$search.removeAttr('aria-activedescendant');
}
});
this.$selection.on('focusin', '.select2-search--inline', function (evt) {
self.trigger('focus', evt);
});
this.$selection.on('focusout', '.select2-search--inline', function (evt) {
self._handleBlur(evt);
});
this.$selection.on('keydown', '.select2-search--inline', function (evt) {
evt.stopPropagation();
self.trigger('keypress', evt);
self._keyUpPrevented = evt.isDefaultPrevented();
var key = evt.which;
if (key === KEYS.BACKSPACE && self.$search.val() === '') {
var $previousChoice = self.$searchContainer
.prev('.select2-selection__choice');
if ($previousChoice.length > 0) {
var item = Utils.GetData($previousChoice[0], 'data');
self.searchRemoveChoice(item);
evt.preventDefault();
}
}
});
this.$selection.on('click', '.select2-search--inline', function (evt) {
if (self.$search.val()) {
evt.stopPropagation();
}
});
// Try to detect the IE version should the `documentMode` property that
// is stored on the document. This is only implemented in IE and is
// slightly cleaner than doing a user agent check.
// This property is not available in Edge, but Edge also doesn't have
// this bug.
var msie = document.documentMode;
var disableInputEvents = msie && msie <= 11;
// Workaround for browsers which do not support the `input` event
// This will prevent double-triggering of events for browsers which support
// both the `keyup` and `input` events.
this.$selection.on(
'input.searchcheck',
'.select2-search--inline',
function (evt) {
// IE will trigger the `input` event when a placeholder is used on a
// search box. To get around this issue, we are forced to ignore all
// `input` events in IE and keep using `keyup`.
if (disableInputEvents) {
self.$selection.off('input.search input.searchcheck');
return;
}
// Unbind the duplicated `keyup` event
self.$selection.off('keyup.search');
}
);
this.$selection.on(
'keyup.search input.search',
'.select2-search--inline',
function (evt) {
// IE will trigger the `input` event when a placeholder is used on a
// search box. To get around this issue, we are forced to ignore all
// `input` events in IE and keep using `keyup`.
if (disableInputEvents && evt.type === 'input') {
self.$selection.off('input.search input.searchcheck');
return;
}
var key = evt.which;
// We can freely ignore events from modifier keys
if (key == KEYS.SHIFT || key == KEYS.CTRL || key == KEYS.ALT) {
return;
}
// Tabbing will be handled during the `keydown` phase
if (key == KEYS.TAB) {
return;
}
self.handleSearch(evt);
}
);
};
/**
* This method will transfer the tabindex attribute from the rendered
* selection to the search box. This allows for the search box to be used as
* the primary focus instead of the selection container.
*
* @private
*/
Search.prototype._transferTabIndex = function (decorated) {
this.$search.attr('tabindex', this.$selection.attr('tabindex'));
this.$selection.attr('tabindex', '-1');
};
Search.prototype.createPlaceholder = function (decorated, placeholder) {
this.$search.attr('placeholder', placeholder.text);
};
Search.prototype.update = function (decorated, data) {
var searchHadFocus = this.$search[0] == document.activeElement;
this.$search.attr('placeholder', '');
decorated.call(this, data);
this.$selection.find('.select2-selection__rendered')
.append(this.$searchContainer);
this.resizeSearch();
if (searchHadFocus) {
this.$search.trigger('focus');
}
};
Search.prototype.handleSearch = function () {
this.resizeSearch();
if (!this._keyUpPrevented) {
var input = this.$search.val();
this.trigger('query', {
term: input
});
}
this._keyUpPrevented = false;
};
Search.prototype.searchRemoveChoice = function (decorated, item) {
this.trigger('unselect', {
data: item
});
this.$search.val(item.text);
this.handleSearch();
};
Search.prototype.resizeSearch = function () {
this.$search.css('width', '25px');
var width = '';
if (this.$search.attr('placeholder') !== '') {
width = this.$selection.find('.select2-selection__rendered').width();
} else {
var minimumWidth = this.$search.val().length + 1;
width = (minimumWidth * 0.75) + 'em';
}
this.$search.css('width', width);
};
return Search;
});
S2.define('select2/selection/eventRelay',[
'jquery'
], function ($) {
function EventRelay () { }
EventRelay.prototype.bind = function (decorated, container, $container) {
var self = this;
var relayEvents = [
'open', 'opening',
'close', 'closing',
'select', 'selecting',
'unselect', 'unselecting',
'clear', 'clearing'
];
var preventableEvents = [
'opening', 'closing', 'selecting', 'unselecting', 'clearing'
];
decorated.call(this, container, $container);
container.on('*', function (name, params) {
// Ignore events that should not be relayed
if ($.inArray(name, relayEvents) === -1) {
return;
}
// The parameters should always be an object
params = params || {};
// Generate the jQuery event for the Select2 event
var evt = $.Event('select2:' + name, {
params: params
});
self.$element.trigger(evt);
// Only handle preventable events if it was one
if ($.inArray(name, preventableEvents) === -1) {
return;
}
params.prevented = evt.isDefaultPrevented();
});
};
return EventRelay;
});
S2.define('select2/translation',[
'jquery',
'require'
], function ($, require) {
function Translation (dict) {
this.dict = dict || {};
}
Translation.prototype.all = function () {
return this.dict;
};
Translation.prototype.get = function (key) {
return this.dict[key];
};
Translation.prototype.extend = function (translation) {
this.dict = $.extend({}, translation.all(), this.dict);
};
// Static functions
Translation._cache = {};
Translation.loadPath = function (path) {
if (!(path in Translation._cache)) {
var translations = require(path);
Translation._cache[path] = translations;
}
return new Translation(Translation._cache[path]);
};
return Translation;
});
S2.define('select2/diacritics',[
], function () {
var diacritics = {
'\u24B6': 'A',
'\uFF21': 'A',
'\u00C0': 'A',
'\u00C1': 'A',
'\u00C2': 'A',
'\u1EA6': 'A',
'\u1EA4': 'A',
'\u1EAA': 'A',
'\u1EA8': 'A',
'\u00C3': 'A',
'\u0100': 'A',
'\u0102': 'A',
'\u1EB0': 'A',
'\u1EAE': 'A',
'\u1EB4': 'A',
'\u1EB2': 'A',
'\u0226': 'A',
'\u01E0': 'A',
'\u00C4': 'A',
'\u01DE': 'A',
'\u1EA2': 'A',
'\u00C5': 'A',
'\u01FA': 'A',
'\u01CD': 'A',
'\u0200': 'A',
'\u0202': 'A',
'\u1EA0': 'A',
'\u1EAC': 'A',
'\u1EB6': 'A',
'\u1E00': 'A',
'\u0104': 'A',
'\u023A': 'A',
'\u2C6F': 'A',
'\uA732': 'AA',
'\u00C6': 'AE',
'\u01FC': 'AE',
'\u01E2': 'AE',
'\uA734': 'AO',
'\uA736': 'AU',
'\uA738': 'AV',
'\uA73A': 'AV',
'\uA73C': 'AY',
'\u24B7': 'B',
'\uFF22': 'B',
'\u1E02': 'B',
'\u1E04': 'B',
'\u1E06': 'B',
'\u0243': 'B',
'\u0182': 'B',
'\u0181': 'B',
'\u24B8': 'C',
'\uFF23': 'C',
'\u0106': 'C',
'\u0108': 'C',
'\u010A': 'C',
'\u010C': 'C',
'\u00C7': 'C',
'\u1E08': 'C',
'\u0187': 'C',
'\u023B': 'C',
'\uA73E': 'C',
'\u24B9': 'D',
'\uFF24': 'D',
'\u1E0A': 'D',
'\u010E': 'D',
'\u1E0C': 'D',
'\u1E10': 'D',
'\u1E12': 'D',
'\u1E0E': 'D',
'\u0110': 'D',
'\u018B': 'D',
'\u018A': 'D',
'\u0189': 'D',
'\uA779': 'D',
'\u01F1': 'DZ',
'\u01C4': 'DZ',
'\u01F2': 'Dz',
'\u01C5': 'Dz',
'\u24BA': 'E',
'\uFF25': 'E',
'\u00C8': 'E',
'\u00C9': 'E',
'\u00CA': 'E',
'\u1EC0': 'E',
'\u1EBE': 'E',
'\u1EC4': 'E',
'\u1EC2': 'E',
'\u1EBC': 'E',
'\u0112': 'E',
'\u1E14': 'E',
'\u1E16': 'E',
'\u0114': 'E',
'\u0116': 'E',
'\u00CB': 'E',
'\u1EBA': 'E',
'\u011A': 'E',
'\u0204': 'E',
'\u0206': 'E',
'\u1EB8': 'E',
'\u1EC6': 'E',
'\u0228': 'E',
'\u1E1C': 'E',
'\u0118': 'E',
'\u1E18': 'E',
'\u1E1A': 'E',
'\u0190': 'E',
'\u018E': 'E',
'\u24BB': 'F',
'\uFF26': 'F',
'\u1E1E': 'F',
'\u0191': 'F',
'\uA77B': 'F',
'\u24BC': 'G',
'\uFF27': 'G',
'\u01F4': 'G',
'\u011C': 'G',
'\u1E20': 'G',
'\u011E': 'G',
'\u0120': 'G',
'\u01E6': 'G',
'\u0122': 'G',
'\u01E4': 'G',
'\u0193': 'G',
'\uA7A0': 'G',
'\uA77D': 'G',
'\uA77E': 'G',
'\u24BD': 'H',
'\uFF28': 'H',
'\u0124': 'H',
'\u1E22': 'H',
'\u1E26': 'H',
'\u021E': 'H',
'\u1E24': 'H',
'\u1E28': 'H',
'\u1E2A': 'H',
'\u0126': 'H',
'\u2C67': 'H',
'\u2C75': 'H',
'\uA78D': 'H',
'\u24BE': 'I',
'\uFF29': 'I',
'\u00CC': 'I',
'\u00CD': 'I',
'\u00CE': 'I',
'\u0128': 'I',
'\u012A': 'I',
'\u012C': 'I',
'\u0130': 'I',
'\u00CF': 'I',
'\u1E2E': 'I',
'\u1EC8': 'I',
'\u01CF': 'I',
'\u0208': 'I',
'\u020A': 'I',
'\u1ECA': 'I',
'\u012E': 'I',
'\u1E2C': 'I',
'\u0197': 'I',
'\u24BF': 'J',
'\uFF2A': 'J',
'\u0134': 'J',
'\u0248': 'J',
'\u24C0': 'K',
'\uFF2B': 'K',
'\u1E30': 'K',
'\u01E8': 'K',
'\u1E32': 'K',
'\u0136': 'K',
'\u1E34': 'K',
'\u0198': 'K',
'\u2C69': 'K',
'\uA740': 'K',
'\uA742': 'K',
'\uA744': 'K',
'\uA7A2': 'K',
'\u24C1': 'L',
'\uFF2C': 'L',
'\u013F': 'L',
'\u0139': 'L',
'\u013D': 'L',
'\u1E36': 'L',
'\u1E38': 'L',
'\u013B': 'L',
'\u1E3C': 'L',
'\u1E3A': 'L',
'\u0141': 'L',
'\u023D': 'L',
'\u2C62': 'L',
'\u2C60': 'L',
'\uA748': 'L',
'\uA746': 'L',
'\uA780': 'L',
'\u01C7': 'LJ',
'\u01C8': 'Lj',
'\u24C2': 'M',
'\uFF2D': 'M',
'\u1E3E': 'M',
'\u1E40': 'M',
'\u1E42': 'M',
'\u2C6E': 'M',
'\u019C': 'M',
'\u24C3': 'N',
'\uFF2E': 'N',
'\u01F8': 'N',
'\u0143': 'N',
'\u00D1': 'N',
'\u1E44': 'N',
'\u0147': 'N',
'\u1E46': 'N',
'\u0145': 'N',
'\u1E4A': 'N',
'\u1E48': 'N',
'\u0220': 'N',
'\u019D': 'N',
'\uA790': 'N',
'\uA7A4': 'N',
'\u01CA': 'NJ',
'\u01CB': 'Nj',
'\u24C4': 'O',
'\uFF2F': 'O',
'\u00D2': 'O',
'\u00D3': 'O',
'\u00D4': 'O',
'\u1ED2': 'O',
'\u1ED0': 'O',
'\u1ED6': 'O',
'\u1ED4': 'O',
'\u00D5': 'O',
'\u1E4C': 'O',
'\u022C': 'O',
'\u1E4E': 'O',
'\u014C': 'O',
'\u1E50': 'O',
'\u1E52': 'O',
'\u014E': 'O',
'\u022E': 'O',
'\u0230': 'O',
'\u00D6': 'O',
'\u022A': 'O',
'\u1ECE': 'O',
'\u0150': 'O',
'\u01D1': 'O',
'\u020C': 'O',
'\u020E': 'O',
'\u01A0': 'O',
'\u1EDC': 'O',
'\u1EDA': 'O',
'\u1EE0': 'O',
'\u1EDE': 'O',
'\u1EE2': 'O',
'\u1ECC': 'O',
'\u1ED8': 'O',
'\u01EA': 'O',
'\u01EC': 'O',
'\u00D8': 'O',
'\u01FE': 'O',
'\u0186': 'O',
'\u019F': 'O',
'\uA74A': 'O',
'\uA74C': 'O',
'\u0152': 'OE',
'\u01A2': 'OI',
'\uA74E': 'OO',
'\u0222': 'OU',
'\u24C5': 'P',
'\uFF30': 'P',
'\u1E54': 'P',
'\u1E56': 'P',
'\u01A4': 'P',
'\u2C63': 'P',
'\uA750': 'P',
'\uA752': 'P',
'\uA754': 'P',
'\u24C6': 'Q',
'\uFF31': 'Q',
'\uA756': 'Q',
'\uA758': 'Q',
'\u024A': 'Q',
'\u24C7': 'R',
'\uFF32': 'R',
'\u0154': 'R',
'\u1E58': 'R',
'\u0158': 'R',
'\u0210': 'R',
'\u0212': 'R',
'\u1E5A': 'R',
'\u1E5C': 'R',
'\u0156': 'R',
'\u1E5E': 'R',
'\u024C': 'R',
'\u2C64': 'R',
'\uA75A': 'R',
'\uA7A6': 'R',
'\uA782': 'R',
'\u24C8': 'S',
'\uFF33': 'S',
'\u1E9E': 'S',
'\u015A': 'S',
'\u1E64': 'S',
'\u015C': 'S',
'\u1E60': 'S',
'\u0160': 'S',
'\u1E66': 'S',
'\u1E62': 'S',
'\u1E68': 'S',
'\u0218': 'S',
'\u015E': 'S',
'\u2C7E': 'S',
'\uA7A8': 'S',
'\uA784': 'S',
'\u24C9': 'T',
'\uFF34': 'T',
'\u1E6A': 'T',
'\u0164': 'T',
'\u1E6C': 'T',
'\u021A': 'T',
'\u0162': 'T',
'\u1E70': 'T',
'\u1E6E': 'T',
'\u0166': 'T',
'\u01AC': 'T',
'\u01AE': 'T',
'\u023E': 'T',
'\uA786': 'T',
'\uA728': 'TZ',
'\u24CA': 'U',
'\uFF35': 'U',
'\u00D9': 'U',
'\u00DA': 'U',
'\u00DB': 'U',
'\u0168': 'U',
'\u1E78': 'U',
'\u016A': 'U',
'\u1E7A': 'U',
'\u016C': 'U',
'\u00DC': 'U',
'\u01DB': 'U',
'\u01D7': 'U',
'\u01D5': 'U',
'\u01D9': 'U',
'\u1EE6': 'U',
'\u016E': 'U',
'\u0170': 'U',
'\u01D3': 'U',
'\u0214': 'U',
'\u0216': 'U',
'\u01AF': 'U',
'\u1EEA': 'U',
'\u1EE8': 'U',
'\u1EEE': 'U',
'\u1EEC': 'U',
'\u1EF0': 'U',
'\u1EE4': 'U',
'\u1E72': 'U',
'\u0172': 'U',
'\u1E76': 'U',
'\u1E74': 'U',
'\u0244': 'U',
'\u24CB': 'V',
'\uFF36': 'V',
'\u1E7C': 'V',
'\u1E7E': 'V',
'\u01B2': 'V',
'\uA75E': 'V',
'\u0245': 'V',
'\uA760': 'VY',
'\u24CC': 'W',
'\uFF37': 'W',
'\u1E80': 'W',
'\u1E82': 'W',
'\u0174': 'W',
'\u1E86': 'W',
'\u1E84': 'W',
'\u1E88': 'W',
'\u2C72': 'W',
'\u24CD': 'X',
'\uFF38': 'X',
'\u1E8A': 'X',
'\u1E8C': 'X',
'\u24CE': 'Y',
'\uFF39': 'Y',
'\u1EF2': 'Y',
'\u00DD': 'Y',
'\u0176': 'Y',
'\u1EF8': 'Y',
'\u0232': 'Y',
'\u1E8E': 'Y',
'\u0178': 'Y',
'\u1EF6': 'Y',
'\u1EF4': 'Y',
'\u01B3': 'Y',
'\u024E': 'Y',
'\u1EFE': 'Y',
'\u24CF': 'Z',
'\uFF3A': 'Z',
'\u0179': 'Z',
'\u1E90': 'Z',
'\u017B': 'Z',
'\u017D': 'Z',
'\u1E92': 'Z',
'\u1E94': 'Z',
'\u01B5': 'Z',
'\u0224': 'Z',
'\u2C7F': 'Z',
'\u2C6B': 'Z',
'\uA762': 'Z',
'\u24D0': 'a',
'\uFF41': 'a',
'\u1E9A': 'a',
'\u00E0': 'a',
'\u00E1': 'a',
'\u00E2': 'a',
'\u1EA7': 'a',
'\u1EA5': 'a',
'\u1EAB': 'a',
'\u1EA9': 'a',
'\u00E3': 'a',
'\u0101': 'a',
'\u0103': 'a',
'\u1EB1': 'a',
'\u1EAF': 'a',
'\u1EB5': 'a',
'\u1EB3': 'a',
'\u0227': 'a',
'\u01E1': 'a',
'\u00E4': 'a',
'\u01DF': 'a',
'\u1EA3': 'a',
'\u00E5': 'a',
'\u01FB': 'a',
'\u01CE': 'a',
'\u0201': 'a',
'\u0203': 'a',
'\u1EA1': 'a',
'\u1EAD': 'a',
'\u1EB7': 'a',
'\u1E01': 'a',
'\u0105': 'a',
'\u2C65': 'a',
'\u0250': 'a',
'\uA733': 'aa',
'\u00E6': 'ae',
'\u01FD': 'ae',
'\u01E3': 'ae',
'\uA735': 'ao',
'\uA737': 'au',
'\uA739': 'av',
'\uA73B': 'av',
'\uA73D': 'ay',
'\u24D1': 'b',
'\uFF42': 'b',
'\u1E03': 'b',
'\u1E05': 'b',
'\u1E07': 'b',
'\u0180': 'b',
'\u0183': 'b',
'\u0253': 'b',
'\u24D2': 'c',
'\uFF43': 'c',
'\u0107': 'c',
'\u0109': 'c',
'\u010B': 'c',
'\u010D': 'c',
'\u00E7': 'c',
'\u1E09': 'c',
'\u0188': 'c',
'\u023C': 'c',
'\uA73F': 'c',
'\u2184': 'c',
'\u24D3': 'd',
'\uFF44': 'd',
'\u1E0B': 'd',
'\u010F': 'd',
'\u1E0D': 'd',
'\u1E11': 'd',
'\u1E13': 'd',
'\u1E0F': 'd',
'\u0111': 'd',
'\u018C': 'd',
'\u0256': 'd',
'\u0257': 'd',
'\uA77A': 'd',
'\u01F3': 'dz',
'\u01C6': 'dz',
'\u24D4': 'e',
'\uFF45': 'e',
'\u00E8': 'e',
'\u00E9': 'e',
'\u00EA': 'e',
'\u1EC1': 'e',
'\u1EBF': 'e',
'\u1EC5': 'e',
'\u1EC3': 'e',
'\u1EBD': 'e',
'\u0113': 'e',
'\u1E15': 'e',
'\u1E17': 'e',
'\u0115': 'e',
'\u0117': 'e',
'\u00EB': 'e',
'\u1EBB': 'e',
'\u011B': 'e',
'\u0205': 'e',
'\u0207': 'e',
'\u1EB9': 'e',
'\u1EC7': 'e',
'\u0229': 'e',
'\u1E1D': 'e',
'\u0119': 'e',
'\u1E19': 'e',
'\u1E1B': 'e',
'\u0247': 'e',
'\u025B': 'e',
'\u01DD': 'e',
'\u24D5': 'f',
'\uFF46': 'f',
'\u1E1F': 'f',
'\u0192': 'f',
'\uA77C': 'f',
'\u24D6': 'g',
'\uFF47': 'g',
'\u01F5': 'g',
'\u011D': 'g',
'\u1E21': 'g',
'\u011F': 'g',
'\u0121': 'g',
'\u01E7': 'g',
'\u0123': 'g',
'\u01E5': 'g',
'\u0260': 'g',
'\uA7A1': 'g',
'\u1D79': 'g',
'\uA77F': 'g',
'\u24D7': 'h',
'\uFF48': 'h',
'\u0125': 'h',
'\u1E23': 'h',
'\u1E27': 'h',
'\u021F': 'h',
'\u1E25': 'h',
'\u1E29': 'h',
'\u1E2B': 'h',
'\u1E96': 'h',
'\u0127': 'h',
'\u2C68': 'h',
'\u2C76': 'h',
'\u0265': 'h',
'\u0195': 'hv',
'\u24D8': 'i',
'\uFF49': 'i',
'\u00EC': 'i',
'\u00ED': 'i',
'\u00EE': 'i',
'\u0129': 'i',
'\u012B': 'i',
'\u012D': 'i',
'\u00EF': 'i',
'\u1E2F': 'i',
'\u1EC9': 'i',
'\u01D0': 'i',
'\u0209': 'i',
'\u020B': 'i',
'\u1ECB': 'i',
'\u012F': 'i',
'\u1E2D': 'i',
'\u0268': 'i',
'\u0131': 'i',
'\u24D9': 'j',
'\uFF4A': 'j',
'\u0135': 'j',
'\u01F0': 'j',
'\u0249': 'j',
'\u24DA': 'k',
'\uFF4B': 'k',
'\u1E31': 'k',
'\u01E9': 'k',
'\u1E33': 'k',
'\u0137': 'k',
'\u1E35': 'k',
'\u0199': 'k',
'\u2C6A': 'k',
'\uA741': 'k',
'\uA743': 'k',
'\uA745': 'k',
'\uA7A3': 'k',
'\u24DB': 'l',
'\uFF4C': 'l',
'\u0140': 'l',
'\u013A': 'l',
'\u013E': 'l',
'\u1E37': 'l',
'\u1E39': 'l',
'\u013C': 'l',
'\u1E3D': 'l',
'\u1E3B': 'l',
'\u017F': 'l',
'\u0142': 'l',
'\u019A': 'l',
'\u026B': 'l',
'\u2C61': 'l',
'\uA749': 'l',
'\uA781': 'l',
'\uA747': 'l',
'\u01C9': 'lj',
'\u24DC': 'm',
'\uFF4D': 'm',
'\u1E3F': 'm',
'\u1E41': 'm',
'\u1E43': 'm',
'\u0271': 'm',
'\u026F': 'm',
'\u24DD': 'n',
'\uFF4E': 'n',
'\u01F9': 'n',
'\u0144': 'n',
'\u00F1': 'n',
'\u1E45': 'n',
'\u0148': 'n',
'\u1E47': 'n',
'\u0146': 'n',
'\u1E4B': 'n',
'\u1E49': 'n',
'\u019E': 'n',
'\u0272': 'n',
'\u0149': 'n',
'\uA791': 'n',
'\uA7A5': 'n',
'\u01CC': 'nj',
'\u24DE': 'o',
'\uFF4F': 'o',
'\u00F2': 'o',
'\u00F3': 'o',
'\u00F4': 'o',
'\u1ED3': 'o',
'\u1ED1': 'o',
'\u1ED7': 'o',
'\u1ED5': 'o',
'\u00F5': 'o',
'\u1E4D': 'o',
'\u022D': 'o',
'\u1E4F': 'o',
'\u014D': 'o',
'\u1E51': 'o',
'\u1E53': 'o',
'\u014F': 'o',
'\u022F': 'o',
'\u0231': 'o',
'\u00F6': 'o',
'\u022B': 'o',
'\u1ECF': 'o',
'\u0151': 'o',
'\u01D2': 'o',
'\u020D': 'o',
'\u020F': 'o',
'\u01A1': 'o',
'\u1EDD': 'o',
'\u1EDB': 'o',
'\u1EE1': 'o',
'\u1EDF': 'o',
'\u1EE3': 'o',
'\u1ECD': 'o',
'\u1ED9': 'o',
'\u01EB': 'o',
'\u01ED': 'o',
'\u00F8': 'o',
'\u01FF': 'o',
'\u0254': 'o',
'\uA74B': 'o',
'\uA74D': 'o',
'\u0275': 'o',
'\u0153': 'oe',
'\u01A3': 'oi',
'\u0223': 'ou',
'\uA74F': 'oo',
'\u24DF': 'p',
'\uFF50': 'p',
'\u1E55': 'p',
'\u1E57': 'p',
'\u01A5': 'p',
'\u1D7D': 'p',
'\uA751': 'p',
'\uA753': 'p',
'\uA755': 'p',
'\u24E0': 'q',
'\uFF51': 'q',
'\u024B': 'q',
'\uA757': 'q',
'\uA759': 'q',
'\u24E1': 'r',
'\uFF52': 'r',
'\u0155': 'r',
'\u1E59': 'r',
'\u0159': 'r',
'\u0211': 'r',
'\u0213': 'r',
'\u1E5B': 'r',
'\u1E5D': 'r',
'\u0157': 'r',
'\u1E5F': 'r',
'\u024D': 'r',
'\u027D': 'r',
'\uA75B': 'r',
'\uA7A7': 'r',
'\uA783': 'r',
'\u24E2': 's',
'\uFF53': 's',
'\u00DF': 's',
'\u015B': 's',
'\u1E65': 's',
'\u015D': 's',
'\u1E61': 's',
'\u0161': 's',
'\u1E67': 's',
'\u1E63': 's',
'\u1E69': 's',
'\u0219': 's',
'\u015F': 's',
'\u023F': 's',
'\uA7A9': 's',
'\uA785': 's',
'\u1E9B': 's',
'\u24E3': 't',
'\uFF54': 't',
'\u1E6B': 't',
'\u1E97': 't',
'\u0165': 't',
'\u1E6D': 't',
'\u021B': 't',
'\u0163': 't',
'\u1E71': 't',
'\u1E6F': 't',
'\u0167': 't',
'\u01AD': 't',
'\u0288': 't',
'\u2C66': 't',
'\uA787': 't',
'\uA729': 'tz',
'\u24E4': 'u',
'\uFF55': 'u',
'\u00F9': 'u',
'\u00FA': 'u',
'\u00FB': 'u',
'\u0169': 'u',
'\u1E79': 'u',
'\u016B': 'u',
'\u1E7B': 'u',
'\u016D': 'u',
'\u00FC': 'u',
'\u01DC': 'u',
'\u01D8': 'u',
'\u01D6': 'u',
'\u01DA': 'u',
'\u1EE7': 'u',
'\u016F': 'u',
'\u0171': 'u',
'\u01D4': 'u',
'\u0215': 'u',
'\u0217': 'u',
'\u01B0': 'u',
'\u1EEB': 'u',
'\u1EE9': 'u',
'\u1EEF': 'u',
'\u1EED': 'u',
'\u1EF1': 'u',
'\u1EE5': 'u',
'\u1E73': 'u',
'\u0173': 'u',
'\u1E77': 'u',
'\u1E75': 'u',
'\u0289': 'u',
'\u24E5': 'v',
'\uFF56': 'v',
'\u1E7D': 'v',
'\u1E7F': 'v',
'\u028B': 'v',
'\uA75F': 'v',
'\u028C': 'v',
'\uA761': 'vy',
'\u24E6': 'w',
'\uFF57': 'w',
'\u1E81': 'w',
'\u1E83': 'w',
'\u0175': 'w',
'\u1E87': 'w',
'\u1E85': 'w',
'\u1E98': 'w',
'\u1E89': 'w',
'\u2C73': 'w',
'\u24E7': 'x',
'\uFF58': 'x',
'\u1E8B': 'x',
'\u1E8D': 'x',
'\u24E8': 'y',
'\uFF59': 'y',
'\u1EF3': 'y',
'\u00FD': 'y',
'\u0177': 'y',
'\u1EF9': 'y',
'\u0233': 'y',
'\u1E8F': 'y',
'\u00FF': 'y',
'\u1EF7': 'y',
'\u1E99': 'y',
'\u1EF5': 'y',
'\u01B4': 'y',
'\u024F': 'y',
'\u1EFF': 'y',
'\u24E9': 'z',
'\uFF5A': 'z',
'\u017A': 'z',
'\u1E91': 'z',
'\u017C': 'z',
'\u017E': 'z',
'\u1E93': 'z',
'\u1E95': 'z',
'\u01B6': 'z',
'\u0225': 'z',
'\u0240': 'z',
'\u2C6C': 'z',
'\uA763': 'z',
'\u0386': '\u0391',
'\u0388': '\u0395',
'\u0389': '\u0397',
'\u038A': '\u0399',
'\u03AA': '\u0399',
'\u038C': '\u039F',
'\u038E': '\u03A5',
'\u03AB': '\u03A5',
'\u038F': '\u03A9',
'\u03AC': '\u03B1',
'\u03AD': '\u03B5',
'\u03AE': '\u03B7',
'\u03AF': '\u03B9',
'\u03CA': '\u03B9',
'\u0390': '\u03B9',
'\u03CC': '\u03BF',
'\u03CD': '\u03C5',
'\u03CB': '\u03C5',
'\u03B0': '\u03C5',
'\u03CE': '\u03C9',
'\u03C2': '\u03C3',
'\u2019': '\''
};
return diacritics;
});
S2.define('select2/data/base',[
'../utils'
], function (Utils) {
function BaseAdapter ($element, options) {
BaseAdapter.__super__.constructor.call(this);
}
Utils.Extend(BaseAdapter, Utils.Observable);
BaseAdapter.prototype.current = function (callback) {
throw new Error('The `current` method must be defined in child classes.');
};
BaseAdapter.prototype.query = function (params, callback) {
throw new Error('The `query` method must be defined in child classes.');
};
BaseAdapter.prototype.bind = function (container, $container) {
// Can be implemented in subclasses
};
BaseAdapter.prototype.destroy = function () {
// Can be implemented in subclasses
};
BaseAdapter.prototype.generateResultId = function (container, data) {
var id = container.id + '-result-';
id += Utils.generateChars(4);
if (data.id != null) {
id += '-' + data.id.toString();
} else {
id += '-' + Utils.generateChars(4);
}
return id;
};
return BaseAdapter;
});
S2.define('select2/data/select',[
'./base',
'../utils',
'jquery'
], function (BaseAdapter, Utils, $) {
function SelectAdapter ($element, options) {
this.$element = $element;
this.options = options;
SelectAdapter.__super__.constructor.call(this);
}
Utils.Extend(SelectAdapter, BaseAdapter);
SelectAdapter.prototype.current = function (callback) {
var data = [];
var self = this;
this.$element.find(':selected').each(function () {
var $option = $(this);
var option = self.item($option);
data.push(option);
});
callback(data);
};
SelectAdapter.prototype.select = function (data) {
var self = this;
data.selected = true;
// If data.element is a DOM node, use it instead
if ($(data.element).is('option')) {
data.element.selected = true;
this.$element.trigger('input').trigger('change');
return;
}
if (this.$element.prop('multiple')) {
this.current(function (currentData) {
var val = [];
data = [data];
data.push.apply(data, currentData);
for (var d = 0; d < data.length; d++) {
var id = data[d].id;
if ($.inArray(id, val) === -1) {
val.push(id);
}
}
self.$element.val(val);
self.$element.trigger('input').trigger('change');
});
} else {
var val = data.id;
this.$element.val(val);
this.$element.trigger('input').trigger('change');
}
};
SelectAdapter.prototype.unselect = function (data) {
var self = this;
if (!this.$element.prop('multiple')) {
return;
}
data.selected = false;
if ($(data.element).is('option')) {
data.element.selected = false;
this.$element.trigger('input').trigger('change');
return;
}
this.current(function (currentData) {
var val = [];
for (var d = 0; d < currentData.length; d++) {
var id = currentData[d].id;
if (id !== data.id && $.inArray(id, val) === -1) {
val.push(id);
}
}
self.$element.val(val);
self.$element.trigger('input').trigger('change');
});
};
SelectAdapter.prototype.bind = function (container, $container) {
var self = this;
this.container = container;
container.on('select', function (params) {
self.select(params.data);
});
container.on('unselect', function (params) {
self.unselect(params.data);
});
};
SelectAdapter.prototype.destroy = function () {
// Remove anything added to child elements
this.$element.find('*').each(function () {
// Remove any custom data set by Select2
Utils.RemoveData(this);
});
};
SelectAdapter.prototype.query = function (params, callback) {
var data = [];
var self = this;
var $options = this.$element.children();
$options.each(function () {
var $option = $(this);
if (!$option.is('option') && !$option.is('optgroup')) {
return;
}
var option = self.item($option);
var matches = self.matches(params, option);
if (matches !== null) {
data.push(matches);
}
});
callback({
results: data
});
};
SelectAdapter.prototype.addOptions = function ($options) {
Utils.appendMany(this.$element, $options);
};
SelectAdapter.prototype.option = function (data) {
var option;
if (data.children) {
option = document.createElement('optgroup');
option.label = data.text;
} else {
option = document.createElement('option');
if (option.textContent !== undefined) {
option.textContent = data.text;
} else {
option.innerText = data.text;
}
}
if (data.id !== undefined) {
option.value = data.id;
}
if (data.disabled) {
option.disabled = true;
}
if (data.selected) {
option.selected = true;
}
if (data.title) {
option.title = data.title;
}
var $option = $(option);
var normalizedData = this._normalizeItem(data);
normalizedData.element = option;
// Override the option's data with the combined data
Utils.StoreData(option, 'data', normalizedData);
return $option;
};
SelectAdapter.prototype.item = function ($option) {
var data = {};
data = Utils.GetData($option[0], 'data');
if (data != null) {
return data;
}
if ($option.is('option')) {
data = {
id: $option.val(),
text: $option.text(),
disabled: $option.prop('disabled'),
selected: $option.prop('selected'),
title: $option.prop('title')
};
} else if ($option.is('optgroup')) {
data = {
text: $option.prop('label'),
children: [],
title: $option.prop('title')
};
var $children = $option.children('option');
var children = [];
for (var c = 0; c < $children.length; c++) {
var $child = $($children[c]);
var child = this.item($child);
children.push(child);
}
data.children = children;
}
data = this._normalizeItem(data);
data.element = $option[0];
Utils.StoreData($option[0], 'data', data);
return data;
};
SelectAdapter.prototype._normalizeItem = function (item) {
if (item !== Object(item)) {
item = {
id: item,
text: item
};
}
item = $.extend({}, {
text: ''
}, item);
var defaults = {
selected: false,
disabled: false
};
if (item.id != null) {
item.id = item.id.toString();
}
if (item.text != null) {
item.text = item.text.toString();
}
if (item._resultId == null && item.id && this.container != null) {
item._resultId = this.generateResultId(this.container, item);
}
return $.extend({}, defaults, item);
};
SelectAdapter.prototype.matches = function (params, data) {
var matcher = this.options.get('matcher');
return matcher(params, data);
};
return SelectAdapter;
});
S2.define('select2/data/array',[
'./select',
'../utils',
'jquery'
], function (SelectAdapter, Utils, $) {
function ArrayAdapter ($element, options) {
this._dataToConvert = options.get('data') || [];
ArrayAdapter.__super__.constructor.call(this, $element, options);
}
Utils.Extend(ArrayAdapter, SelectAdapter);
ArrayAdapter.prototype.bind = function (container, $container) {
ArrayAdapter.__super__.bind.call(this, container, $container);
this.addOptions(this.convertToOptions(this._dataToConvert));
};
ArrayAdapter.prototype.select = function (data) {
var $option = this.$element.find('option').filter(function (i, elm) {
return elm.value == data.id.toString();
});
if ($option.length === 0) {
$option = this.option(data);
this.addOptions($option);
}
ArrayAdapter.__super__.select.call(this, data);
};
ArrayAdapter.prototype.convertToOptions = function (data) {
var self = this;
var $existing = this.$element.find('option');
var existingIds = $existing.map(function () {
return self.item($(this)).id;
}).get();
var $options = [];
// Filter out all items except for the one passed in the argument
function onlyItem (item) {
return function () {
return $(this).val() == item.id;
};
}
for (var d = 0; d < data.length; d++) {
var item = this._normalizeItem(data[d]);
// Skip items which were pre-loaded, only merge the data
if ($.inArray(item.id, existingIds) >= 0) {
var $existingOption = $existing.filter(onlyItem(item));
var existingData = this.item($existingOption);
var newData = $.extend(true, {}, item, existingData);
var $newOption = this.option(newData);
$existingOption.replaceWith($newOption);
continue;
}
var $option = this.option(item);
if (item.children) {
var $children = this.convertToOptions(item.children);
Utils.appendMany($option, $children);
}
$options.push($option);
}
return $options;
};
return ArrayAdapter;
});
S2.define('select2/data/ajax',[
'./array',
'../utils',
'jquery'
], function (ArrayAdapter, Utils, $) {
function AjaxAdapter ($element, options) {
this.ajaxOptions = this._applyDefaults(options.get('ajax'));
if (this.ajaxOptions.processResults != null) {
this.processResults = this.ajaxOptions.processResults;
}
AjaxAdapter.__super__.constructor.call(this, $element, options);
}
Utils.Extend(AjaxAdapter, ArrayAdapter);
AjaxAdapter.prototype._applyDefaults = function (options) {
var defaults = {
data: function (params) {
return $.extend({}, params, {
q: params.term
});
},
transport: function (params, success, failure) {
var $request = $.ajax(params);
$request.then(success);
$request.fail(failure);
return $request;
}
};
return $.extend({}, defaults, options, true);
};
AjaxAdapter.prototype.processResults = function (results) {
return results;
};
AjaxAdapter.prototype.query = function (params, callback) {
var matches = [];
var self = this;
if (this._request != null) {
// JSONP requests cannot always be aborted
if ($.isFunction(this._request.abort)) {
this._request.abort();
}
this._request = null;
}
var options = $.extend({
type: 'GET'
}, this.ajaxOptions);
if (typeof options.url === 'function') {
options.url = options.url.call(this.$element, params);
}
if (typeof options.data === 'function') {
options.data = options.data.call(this.$element, params);
}
function request () {
var $request = options.transport(options, function (data) {
var results = self.processResults(data, params);
if (self.options.get('debug') && window.console && console.error) {
// Check to make sure that the response included a `results` key.
if (!results || !results.results || !$.isArray(results.results)) {
console.error(
'Select2: The AJAX results did not return an array in the ' +
'`results` key of the response.'
);
}
}
callback(results);
}, function () {
// Attempt to detect if a request was aborted
// Only works if the transport exposes a status property
if ('status' in $request &&
($request.status === 0 || $request.status === '0')) {
return;
}
self.trigger('results:message', {
message: 'errorLoading'
});
});
self._request = $request;
}
if (this.ajaxOptions.delay && params.term != null) {
if (this._queryTimeout) {
window.clearTimeout(this._queryTimeout);
}
this._queryTimeout = window.setTimeout(request, this.ajaxOptions.delay);
} else {
request();
}
};
return AjaxAdapter;
});
S2.define('select2/data/tags',[
'jquery'
], function ($) {
function Tags (decorated, $element, options) {
var tags = options.get('tags');
var createTag = options.get('createTag');
if (createTag !== undefined) {
this.createTag = createTag;
}
var insertTag = options.get('insertTag');
if (insertTag !== undefined) {
this.insertTag = insertTag;
}
decorated.call(this, $element, options);
if ($.isArray(tags)) {
for (var t = 0; t < tags.length; t++) {
var tag = tags[t];
var item = this._normalizeItem(tag);
var $option = this.option(item);
this.$element.append($option);
}
}
}
Tags.prototype.query = function (decorated, params, callback) {
var self = this;
this._removeOldTags();
if (params.term == null || params.page != null) {
decorated.call(this, params, callback);
return;
}
function wrapper (obj, child) {
var data = obj.results;
for (var i = 0; i < data.length; i++) {
var option = data[i];
var checkChildren = (
option.children != null &&
!wrapper({
results: option.children
}, true)
);
var optionText = (option.text || '').toUpperCase();
var paramsTerm = (params.term || '').toUpperCase();
var checkText = optionText === paramsTerm;
if (checkText || checkChildren) {
if (child) {
return false;
}
obj.data = data;
callback(obj);
return;
}
}
if (child) {
return true;
}
var tag = self.createTag(params);
if (tag != null) {
var $option = self.option(tag);
$option.attr('data-select2-tag', true);
self.addOptions([$option]);
self.insertTag(data, tag);
}
obj.results = data;
callback(obj);
}
decorated.call(this, params, wrapper);
};
Tags.prototype.createTag = function (decorated, params) {
var term = $.trim(params.term);
if (term === '') {
return null;
}
return {
id: term,
text: term
};
};
Tags.prototype.insertTag = function (_, data, tag) {
data.unshift(tag);
};
Tags.prototype._removeOldTags = function (_) {
var $options = this.$element.find('option[data-select2-tag]');
$options.each(function () {
if (this.selected) {
return;
}
$(this).remove();
});
};
return Tags;
});
S2.define('select2/data/tokenizer',[
'jquery'
], function ($) {
function Tokenizer (decorated, $element, options) {
var tokenizer = options.get('tokenizer');
if (tokenizer !== undefined) {
this.tokenizer = tokenizer;
}
decorated.call(this, $element, options);
}
Tokenizer.prototype.bind = function (decorated, container, $container) {
decorated.call(this, container, $container);
this.$search = container.dropdown.$search || container.selection.$search ||
$container.find('.select2-search__field');
};
Tokenizer.prototype.query = function (decorated, params, callback) {
var self = this;
function createAndSelect (data) {
// Normalize the data object so we can use it for checks
var item = self._normalizeItem(data);
// Check if the data object already exists as a tag
// Select it if it doesn't
var $existingOptions = self.$element.find('option').filter(function () {
return $(this).val() === item.id;
});
// If an existing option wasn't found for it, create the option
if (!$existingOptions.length) {
var $option = self.option(item);
$option.attr('data-select2-tag', true);
self._removeOldTags();
self.addOptions([$option]);
}
// Select the item, now that we know there is an option for it
select(item);
}
function select (data) {
self.trigger('select', {
data: data
});
}
params.term = params.term || '';
var tokenData = this.tokenizer(params, this.options, createAndSelect);
if (tokenData.term !== params.term) {
// Replace the search term if we have the search box
if (this.$search.length) {
this.$search.val(tokenData.term);
this.$search.trigger('focus');
}
params.term = tokenData.term;
}
decorated.call(this, params, callback);
};
Tokenizer.prototype.tokenizer = function (_, params, options, callback) {
var separators = options.get('tokenSeparators') || [];
var term = params.term;
var i = 0;
var createTag = this.createTag || function (params) {
return {
id: params.term,
text: params.term
};
};
while (i < term.length) {
var termChar = term[i];
if ($.inArray(termChar, separators) === -1) {
i++;
continue;
}
var part = term.substr(0, i);
var partParams = $.extend({}, params, {
term: part
});
var data = createTag(partParams);
if (data == null) {
i++;
continue;
}
callback(data);
// Reset the term to not include the tokenized portion
term = term.substr(i + 1) || '';
i = 0;
}
return {
term: term
};
};
return Tokenizer;
});
S2.define('select2/data/minimumInputLength',[
], function () {
function MinimumInputLength (decorated, $e, options) {
this.minimumInputLength = options.get('minimumInputLength');
decorated.call(this, $e, options);
}
MinimumInputLength.prototype.query = function (decorated, params, callback) {
params.term = params.term || '';
if (params.term.length < this.minimumInputLength) {
this.trigger('results:message', {
message: 'inputTooShort',
args: {
minimum: this.minimumInputLength,
input: params.term,
params: params
}
});
return;
}
decorated.call(this, params, callback);
};
return MinimumInputLength;
});
S2.define('select2/data/maximumInputLength',[
], function () {
function MaximumInputLength (decorated, $e, options) {
this.maximumInputLength = options.get('maximumInputLength');
decorated.call(this, $e, options);
}
MaximumInputLength.prototype.query = function (decorated, params, callback) {
params.term = params.term || '';
if (this.maximumInputLength > 0 &&
params.term.length > this.maximumInputLength) {
this.trigger('results:message', {
message: 'inputTooLong',
args: {
maximum: this.maximumInputLength,
input: params.term,
params: params
}
});
return;
}
decorated.call(this, params, callback);
};
return MaximumInputLength;
});
S2.define('select2/data/maximumSelectionLength',[
], function (){
function MaximumSelectionLength (decorated, $e, options) {
this.maximumSelectionLength = options.get('maximumSelectionLength');
decorated.call(this, $e, options);
}
MaximumSelectionLength.prototype.bind =
function (decorated, container, $container) {
var self = this;
decorated.call(this, container, $container);
container.on('select', function () {
self._checkIfMaximumSelected();
});
};
MaximumSelectionLength.prototype.query =
function (decorated, params, callback) {
var self = this;
this._checkIfMaximumSelected(function () {
decorated.call(self, params, callback);
});
};
MaximumSelectionLength.prototype._checkIfMaximumSelected =
function (_, successCallback) {
var self = this;
this.current(function (currentData) {
var count = currentData != null ? currentData.length : 0;
if (self.maximumSelectionLength > 0 &&
count >= self.maximumSelectionLength) {
self.trigger('results:message', {
message: 'maximumSelected',
args: {
maximum: self.maximumSelectionLength
}
});
return;
}
if (successCallback) {
successCallback();
}
});
};
return MaximumSelectionLength;
});
S2.define('select2/dropdown',[
'jquery',
'./utils'
], function ($, Utils) {
function Dropdown ($element, options) {
this.$element = $element;
this.options = options;
Dropdown.__super__.constructor.call(this);
}
Utils.Extend(Dropdown, Utils.Observable);
Dropdown.prototype.render = function () {
var $dropdown = $(
'<span class="select2-dropdown">' +
'<span class="select2-results"></span>' +
'</span>'
);
$dropdown.attr('dir', this.options.get('dir'));
this.$dropdown = $dropdown;
return $dropdown;
};
Dropdown.prototype.bind = function () {
// Should be implemented in subclasses
};
Dropdown.prototype.position = function ($dropdown, $container) {
// Should be implemented in subclasses
};
Dropdown.prototype.destroy = function () {
// Remove the dropdown from the DOM
this.$dropdown.remove();
};
return Dropdown;
});
S2.define('select2/dropdown/search',[
'jquery',
'../utils'
], function ($, Utils) {
function Search () { }
Search.prototype.render = function (decorated) {
var $rendered = decorated.call(this);
var $search = $(
'<span class="select2-search select2-search--dropdown">' +
'<input class="select2-search__field" type="search" tabindex="-1"' +
' autocomplete="off" autocorrect="off" autocapitalize="none"' +
' spellcheck="false" role="searchbox" aria-autocomplete="list" />' +
'</span>'
);
this.$searchContainer = $search;
this.$search = $search.find('input');
$rendered.prepend($search);
return $rendered;
};
Search.prototype.bind = function (decorated, container, $container) {
var self = this;
var resultsId = container.id + '-results';
decorated.call(this, container, $container);
this.$search.on('keydown', function (evt) {
self.trigger('keypress', evt);
self._keyUpPrevented = evt.isDefaultPrevented();
});
// Workaround for browsers which do not support the `input` event
// This will prevent double-triggering of events for browsers which support
// both the `keyup` and `input` events.
this.$search.on('input', function (evt) {
// Unbind the duplicated `keyup` event
$(this).off('keyup');
});
this.$search.on('keyup input', function (evt) {
self.handleSearch(evt);
});
container.on('open', function () {
self.$search.attr('tabindex', 0);
self.$search.attr('aria-controls', resultsId);
self.$search.trigger('focus');
window.setTimeout(function () {
self.$search.trigger('focus');
}, 0);
});
container.on('close', function () {
self.$search.attr('tabindex', -1);
self.$search.removeAttr('aria-controls');
self.$search.removeAttr('aria-activedescendant');
self.$search.val('');
self.$search.trigger('blur');
});
container.on('focus', function () {
if (!container.isOpen()) {
self.$search.trigger('focus');
}
});
container.on('results:all', function (params) {
if (params.query.term == null || params.query.term === '') {
var showSearch = self.showSearch(params);
if (showSearch) {
self.$searchContainer.removeClass('select2-search--hide');
} else {
self.$searchContainer.addClass('select2-search--hide');
}
}
});
container.on('results:focus', function (params) {
if (params.data._resultId) {
self.$search.attr('aria-activedescendant', params.data._resultId);
} else {
self.$search.removeAttr('aria-activedescendant');
}
});
};
Search.prototype.handleSearch = function (evt) {
if (!this._keyUpPrevented) {
var input = this.$search.val();
this.trigger('query', {
term: input
});
}
this._keyUpPrevented = false;
};
Search.prototype.showSearch = function (_, params) {
return true;
};
return Search;
});
S2.define('select2/dropdown/hidePlaceholder',[
], function () {
function HidePlaceholder (decorated, $element, options, dataAdapter) {
this.placeholder = this.normalizePlaceholder(options.get('placeholder'));
decorated.call(this, $element, options, dataAdapter);
}
HidePlaceholder.prototype.append = function (decorated, data) {
data.results = this.removePlaceholder(data.results);
decorated.call(this, data);
};
HidePlaceholder.prototype.normalizePlaceholder = function (_, placeholder) {
if (typeof placeholder === 'string') {
placeholder = {
id: '',
text: placeholder
};
}
return placeholder;
};
HidePlaceholder.prototype.removePlaceholder = function (_, data) {
var modifiedData = data.slice(0);
for (var d = data.length - 1; d >= 0; d--) {
var item = data[d];
if (this.placeholder.id === item.id) {
modifiedData.splice(d, 1);
}
}
return modifiedData;
};
return HidePlaceholder;
});
S2.define('select2/dropdown/infiniteScroll',[
'jquery'
], function ($) {
function InfiniteScroll (decorated, $element, options, dataAdapter) {
this.lastParams = {};
decorated.call(this, $element, options, dataAdapter);
this.$loadingMore = this.createLoadingMore();
this.loading = false;
}
InfiniteScroll.prototype.append = function (decorated, data) {
this.$loadingMore.remove();
this.loading = false;
decorated.call(this, data);
if (this.showLoadingMore(data)) {
this.$results.append(this.$loadingMore);
this.loadMoreIfNeeded();
}
};
InfiniteScroll.prototype.bind = function (decorated, container, $container) {
var self = this;
decorated.call(this, container, $container);
container.on('query', function (params) {
self.lastParams = params;
self.loading = true;
});
container.on('query:append', function (params) {
self.lastParams = params;
self.loading = true;
});
this.$results.on('scroll', this.loadMoreIfNeeded.bind(this));
};
InfiniteScroll.prototype.loadMoreIfNeeded = function () {
var isLoadMoreVisible = $.contains(
document.documentElement,
this.$loadingMore[0]
);
if (this.loading || !isLoadMoreVisible) {
return;
}
var currentOffset = this.$results.offset().top +
this.$results.outerHeight(false);
var loadingMoreOffset = this.$loadingMore.offset().top +
this.$loadingMore.outerHeight(false);
if (currentOffset + 50 >= loadingMoreOffset) {
this.loadMore();
}
};
InfiniteScroll.prototype.loadMore = function () {
this.loading = true;
var params = $.extend({}, {page: 1}, this.lastParams);
params.page++;
this.trigger('query:append', params);
};
InfiniteScroll.prototype.showLoadingMore = function (_, data) {
return data.pagination && data.pagination.more;
};
InfiniteScroll.prototype.createLoadingMore = function () {
var $option = $(
'<li ' +
'class="select2-results__option select2-results__option--load-more"' +
'role="option" aria-disabled="true"></li>'
);
var message = this.options.get('translations').get('loadingMore');
$option.html(message(this.lastParams));
return $option;
};
return InfiniteScroll;
});
S2.define('select2/dropdown/attachBody',[
'jquery',
'../utils'
], function ($, Utils) {
function AttachBody (decorated, $element, options) {
this.$dropdownParent = $(options.get('dropdownParent') || document.body);
decorated.call(this, $element, options);
}
AttachBody.prototype.bind = function (decorated, container, $container) {
var self = this;
decorated.call(this, container, $container);
container.on('open', function () {
self._showDropdown();
self._attachPositioningHandler(container);
// Must bind after the results handlers to ensure correct sizing
self._bindContainerResultHandlers(container);
});
container.on('close', function () {
self._hideDropdown();
self._detachPositioningHandler(container);
});
this.$dropdownContainer.on('mousedown', function (evt) {
evt.stopPropagation();
});
};
AttachBody.prototype.destroy = function (decorated) {
decorated.call(this);
this.$dropdownContainer.remove();
};
AttachBody.prototype.position = function (decorated, $dropdown, $container) {
// Clone all of the container classes
$dropdown.attr('class', $container.attr('class'));
$dropdown.removeClass('select2');
$dropdown.addClass('select2-container--open');
$dropdown.css({
position: 'absolute',
top: -999999
});
this.$container = $container;
};
AttachBody.prototype.render = function (decorated) {
var $container = $('<span></span>');
var $dropdown = decorated.call(this);
$container.append($dropdown);
this.$dropdownContainer = $container;
return $container;
};
AttachBody.prototype._hideDropdown = function (decorated) {
this.$dropdownContainer.detach();
};
AttachBody.prototype._bindContainerResultHandlers =
function (decorated, container) {
// These should only be bound once
if (this._containerResultsHandlersBound) {
return;
}
var self = this;
container.on('results:all', function () {
self._positionDropdown();
self._resizeDropdown();
});
container.on('results:append', function () {
self._positionDropdown();
self._resizeDropdown();
});
container.on('results:message', function () {
self._positionDropdown();
self._resizeDropdown();
});
container.on('select', function () {
self._positionDropdown();
self._resizeDropdown();
});
container.on('unselect', function () {
self._positionDropdown();
self._resizeDropdown();
});
this._containerResultsHandlersBound = true;
};
AttachBody.prototype._attachPositioningHandler =
function (decorated, container) {
var self = this;
var scrollEvent = 'scroll.select2.' + container.id;
var resizeEvent = 'resize.select2.' + container.id;
var orientationEvent = 'orientationchange.select2.' + container.id;
var $watchers = this.$container.parents().filter(Utils.hasScroll);
$watchers.each(function () {
Utils.StoreData(this, 'select2-scroll-position', {
x: $(this).scrollLeft(),
y: $(this).scrollTop()
});
});
$watchers.on(scrollEvent, function (ev) {
var position = Utils.GetData(this, 'select2-scroll-position');
$(this).scrollTop(position.y);
});
$(window).on(scrollEvent + ' ' + resizeEvent + ' ' + orientationEvent,
function (e) {
self._positionDropdown();
self._resizeDropdown();
});
};
AttachBody.prototype._detachPositioningHandler =
function (decorated, container) {
var scrollEvent = 'scroll.select2.' + container.id;
var resizeEvent = 'resize.select2.' + container.id;
var orientationEvent = 'orientationchange.select2.' + container.id;
var $watchers = this.$container.parents().filter(Utils.hasScroll);
$watchers.off(scrollEvent);
$(window).off(scrollEvent + ' ' + resizeEvent + ' ' + orientationEvent);
};
AttachBody.prototype._positionDropdown = function () {
var $window = $(window);
var isCurrentlyAbove = this.$dropdown.hasClass('select2-dropdown--above');
var isCurrentlyBelow = this.$dropdown.hasClass('select2-dropdown--below');
var newDirection = null;
var offset = this.$container.offset();
offset.bottom = offset.top + this.$container.outerHeight(false);
var container = {
height: this.$container.outerHeight(false)
};
container.top = offset.top;
container.bottom = offset.top + container.height;
var dropdown = {
height: this.$dropdown.outerHeight(false)
};
var viewport = {
top: $window.scrollTop(),
bottom: $window.scrollTop() + $window.height()
};
var enoughRoomAbove = viewport.top < (offset.top - dropdown.height);
var enoughRoomBelow = viewport.bottom > (offset.bottom + dropdown.height);
var css = {
left: offset.left,
top: container.bottom
};
// Determine what the parent element is to use for calculating the offset
var $offsetParent = this.$dropdownParent;
// For statically positioned elements, we need to get the element
// that is determining the offset
if ($offsetParent.css('position') === 'static') {
$offsetParent = $offsetParent.offsetParent();
}
var parentOffset = {
top: 0,
left: 0
};
if (
$.contains(document.body, $offsetParent[0]) ||
$offsetParent[0].isConnected
) {
parentOffset = $offsetParent.offset();
}
css.top -= parentOffset.top;
css.left -= parentOffset.left;
if (!isCurrentlyAbove && !isCurrentlyBelow) {
newDirection = 'below';
}
if (!enoughRoomBelow && enoughRoomAbove && !isCurrentlyAbove) {
newDirection = 'above';
} else if (!enoughRoomAbove && enoughRoomBelow && isCurrentlyAbove) {
newDirection = 'below';
}
if (newDirection == 'above' ||
(isCurrentlyAbove && newDirection !== 'below')) {
css.top = container.top - parentOffset.top - dropdown.height;
}
if (newDirection != null) {
this.$dropdown
.removeClass('select2-dropdown--below select2-dropdown--above')
.addClass('select2-dropdown--' + newDirection);
this.$container
.removeClass('select2-container--below select2-container--above')
.addClass('select2-container--' + newDirection);
}
this.$dropdownContainer.css(css);
};
AttachBody.prototype._resizeDropdown = function () {
var css = {
width: this.$container.outerWidth(false) + 'px'
};
if (this.options.get('dropdownAutoWidth')) {
css.minWidth = css.width;
css.position = 'relative';
css.width = 'auto';
}
this.$dropdown.css(css);
};
AttachBody.prototype._showDropdown = function (decorated) {
this.$dropdownContainer.appendTo(this.$dropdownParent);
this._positionDropdown();
this._resizeDropdown();
};
return AttachBody;
});
S2.define('select2/dropdown/minimumResultsForSearch',[
], function () {
function countResults (data) {
var count = 0;
for (var d = 0; d < data.length; d++) {
var item = data[d];
if (item.children) {
count += countResults(item.children);
} else {
count++;
}
}
return count;
}
function MinimumResultsForSearch (decorated, $element, options, dataAdapter) {
this.minimumResultsForSearch = options.get('minimumResultsForSearch');
if (this.minimumResultsForSearch < 0) {
this.minimumResultsForSearch = Infinity;
}
decorated.call(this, $element, options, dataAdapter);
}
MinimumResultsForSearch.prototype.showSearch = function (decorated, params) {
if (countResults(params.data.results) < this.minimumResultsForSearch) {
return false;
}
return decorated.call(this, params);
};
return MinimumResultsForSearch;
});
S2.define('select2/dropdown/selectOnClose',[
'../utils'
], function (Utils) {
function SelectOnClose () { }
SelectOnClose.prototype.bind = function (decorated, container, $container) {
var self = this;
decorated.call(this, container, $container);
container.on('close', function (params) {
self._handleSelectOnClose(params);
});
};
SelectOnClose.prototype._handleSelectOnClose = function (_, params) {
if (params && params.originalSelect2Event != null) {
var event = params.originalSelect2Event;
// Don't select an item if the close event was triggered from a select or
// unselect event
if (event._type === 'select' || event._type === 'unselect') {
return;
}
}
var $highlightedResults = this.getHighlightedResults();
// Only select highlighted results
if ($highlightedResults.length < 1) {
return;
}
var data = Utils.GetData($highlightedResults[0], 'data');
// Don't re-select already selected resulte
if (
(data.element != null && data.element.selected) ||
(data.element == null && data.selected)
) {
return;
}
this.trigger('select', {
data: data
});
};
return SelectOnClose;
});
S2.define('select2/dropdown/closeOnSelect',[
], function () {
function CloseOnSelect () { }
CloseOnSelect.prototype.bind = function (decorated, container, $container) {
var self = this;
decorated.call(this, container, $container);
container.on('select', function (evt) {
self._selectTriggered(evt);
});
container.on('unselect', function (evt) {
self._selectTriggered(evt);
});
};
CloseOnSelect.prototype._selectTriggered = function (_, evt) {
var originalEvent = evt.originalEvent;
// Don't close if the control key is being held
if (originalEvent && (originalEvent.ctrlKey || originalEvent.metaKey)) {
return;
}
this.trigger('close', {
originalEvent: originalEvent,
originalSelect2Event: evt
});
};
return CloseOnSelect;
});
S2.define('select2/i18n/en',[],function () {
// English
return {
errorLoading: function () {
return 'The results could not be loaded.';
},
inputTooLong: function (args) {
var overChars = args.input.length - args.maximum;
var message = 'Please delete ' + overChars + ' character';
if (overChars != 1) {
message += 's';
}
return message;
},
inputTooShort: function (args) {
var remainingChars = args.minimum - args.input.length;
var message = 'Please enter ' + remainingChars + ' or more characters';
return message;
},
loadingMore: function () {
return 'Loading more results…';
},
maximumSelected: function (args) {
var message = 'You can only select ' + args.maximum + ' item';
if (args.maximum != 1) {
message += 's';
}
return message;
},
noResults: function () {
return 'No results found';
},
searching: function () {
return 'Searching…';
},
removeAllItems: function () {
return 'Remove all items';
}
};
});
S2.define('select2/defaults',[
'jquery',
'require',
'./results',
'./selection/single',
'./selection/multiple',
'./selection/placeholder',
'./selection/allowClear',
'./selection/search',
'./selection/eventRelay',
'./utils',
'./translation',
'./diacritics',
'./data/select',
'./data/array',
'./data/ajax',
'./data/tags',
'./data/tokenizer',
'./data/minimumInputLength',
'./data/maximumInputLength',
'./data/maximumSelectionLength',
'./dropdown',
'./dropdown/search',
'./dropdown/hidePlaceholder',
'./dropdown/infiniteScroll',
'./dropdown/attachBody',
'./dropdown/minimumResultsForSearch',
'./dropdown/selectOnClose',
'./dropdown/closeOnSelect',
'./i18n/en'
], function ($, require,
ResultsList,
SingleSelection, MultipleSelection, Placeholder, AllowClear,
SelectionSearch, EventRelay,
Utils, Translation, DIACRITICS,
SelectData, ArrayData, AjaxData, Tags, Tokenizer,
MinimumInputLength, MaximumInputLength, MaximumSelectionLength,
Dropdown, DropdownSearch, HidePlaceholder, InfiniteScroll,
AttachBody, MinimumResultsForSearch, SelectOnClose, CloseOnSelect,
EnglishTranslation) {
function Defaults () {
this.reset();
}
Defaults.prototype.apply = function (options) {
options = $.extend(true, {}, this.defaults, options);
if (options.dataAdapter == null) {
if (options.ajax != null) {
options.dataAdapter = AjaxData;
} else if (options.data != null) {
options.dataAdapter = ArrayData;
} else {
options.dataAdapter = SelectData;
}
if (options.minimumInputLength > 0) {
options.dataAdapter = Utils.Decorate(
options.dataAdapter,
MinimumInputLength
);
}
if (options.maximumInputLength > 0) {
options.dataAdapter = Utils.Decorate(
options.dataAdapter,
MaximumInputLength
);
}
if (options.maximumSelectionLength > 0) {
options.dataAdapter = Utils.Decorate(
options.dataAdapter,
MaximumSelectionLength
);
}
if (options.tags) {
options.dataAdapter = Utils.Decorate(options.dataAdapter, Tags);
}
if (options.tokenSeparators != null || options.tokenizer != null) {
options.dataAdapter = Utils.Decorate(
options.dataAdapter,
Tokenizer
);
}
if (options.query != null) {
var Query = require(options.amdBase + 'compat/query');
options.dataAdapter = Utils.Decorate(
options.dataAdapter,
Query
);
}
if (options.initSelection != null) {
var InitSelection = require(options.amdBase + 'compat/initSelection');
options.dataAdapter = Utils.Decorate(
options.dataAdapter,
InitSelection
);
}
}
if (options.resultsAdapter == null) {
options.resultsAdapter = ResultsList;
if (options.ajax != null) {
options.resultsAdapter = Utils.Decorate(
options.resultsAdapter,
InfiniteScroll
);
}
if (options.placeholder != null) {
options.resultsAdapter = Utils.Decorate(
options.resultsAdapter,
HidePlaceholder
);
}
if (options.selectOnClose) {
options.resultsAdapter = Utils.Decorate(
options.resultsAdapter,
SelectOnClose
);
}
}
if (options.dropdownAdapter == null) {
if (options.multiple) {
options.dropdownAdapter = Dropdown;
} else {
var SearchableDropdown = Utils.Decorate(Dropdown, DropdownSearch);
options.dropdownAdapter = SearchableDropdown;
}
if (options.minimumResultsForSearch !== 0) {
options.dropdownAdapter = Utils.Decorate(
options.dropdownAdapter,
MinimumResultsForSearch
);
}
if (options.closeOnSelect) {
options.dropdownAdapter = Utils.Decorate(
options.dropdownAdapter,
CloseOnSelect
);
}
if (
options.dropdownCssClass != null ||
options.dropdownCss != null ||
options.adaptDropdownCssClass != null
) {
var DropdownCSS = require(options.amdBase + 'compat/dropdownCss');
options.dropdownAdapter = Utils.Decorate(
options.dropdownAdapter,
DropdownCSS
);
}
options.dropdownAdapter = Utils.Decorate(
options.dropdownAdapter,
AttachBody
);
}
if (options.selectionAdapter == null) {
if (options.multiple) {
options.selectionAdapter = MultipleSelection;
} else {
options.selectionAdapter = SingleSelection;
}
// Add the placeholder mixin if a placeholder was specified
if (options.placeholder != null) {
options.selectionAdapter = Utils.Decorate(
options.selectionAdapter,
Placeholder
);
}
if (options.allowClear) {
options.selectionAdapter = Utils.Decorate(
options.selectionAdapter,
AllowClear
);
}
if (options.multiple) {
options.selectionAdapter = Utils.Decorate(
options.selectionAdapter,
SelectionSearch
);
}
if (
options.containerCssClass != null ||
options.containerCss != null ||
options.adaptContainerCssClass != null
) {
var ContainerCSS = require(options.amdBase + 'compat/containerCss');
options.selectionAdapter = Utils.Decorate(
options.selectionAdapter,
ContainerCSS
);
}
options.selectionAdapter = Utils.Decorate(
options.selectionAdapter,
EventRelay
);
}
// If the defaults were not previously applied from an element, it is
// possible for the language option to have not been resolved
options.language = this._resolveLanguage(options.language);
// Always fall back to English since it will always be complete
options.language.push('en');
var uniqueLanguages = [];
for (var l = 0; l < options.language.length; l++) {
var language = options.language[l];
if (uniqueLanguages.indexOf(language) === -1) {
uniqueLanguages.push(language);
}
}
options.language = uniqueLanguages;
options.translations = this._processTranslations(
options.language,
options.debug
);
return options;
};
Defaults.prototype.reset = function () {
function stripDiacritics (text) {
// Used 'uni range + named function' from http://jsperf.com/diacritics/18
function match(a) {
return DIACRITICS[a] || a;
}
return text.replace(/[^\u0000-\u007E]/g, match);
}
function matcher (params, data) {
// Always return the object if there is nothing to compare
if ($.trim(params.term) === '') {
return data;
}
// Do a recursive check for options with children
if (data.children && data.children.length > 0) {
// Clone the data object if there are children
// This is required as we modify the object to remove any non-matches
var match = $.extend(true, {}, data);
// Check each child of the option
for (var c = data.children.length - 1; c >= 0; c--) {
var child = data.children[c];
var matches = matcher(params, child);
// If there wasn't a match, remove the object in the array
if (matches == null) {
match.children.splice(c, 1);
}
}
// If any children matched, return the new object
if (match.children.length > 0) {
return match;
}
// If there were no matching children, check just the plain object
return matcher(params, match);
}
var original = stripDiacritics(data.text).toUpperCase();
var term = stripDiacritics(params.term).toUpperCase();
// Check if the text contains the term
if (original.indexOf(term) > -1) {
return data;
}
// If it doesn't contain the term, don't return anything
return null;
}
this.defaults = {
amdBase: './',
amdLanguageBase: './i18n/',
closeOnSelect: true,
debug: false,
dropdownAutoWidth: false,
escapeMarkup: Utils.escapeMarkup,
language: {},
matcher: matcher,
minimumInputLength: 0,
maximumInputLength: 0,
maximumSelectionLength: 0,
minimumResultsForSearch: 0,
selectOnClose: false,
scrollAfterSelect: false,
sorter: function (data) {
return data;
},
templateResult: function (result) {
return result.text;
},
templateSelection: function (selection) {
return selection.text;
},
theme: 'default',
width: 'resolve'
};
};
Defaults.prototype.applyFromElement = function (options, $element) {
var optionLanguage = options.language;
var defaultLanguage = this.defaults.language;
var elementLanguage = $element.prop('lang');
var parentLanguage = $element.closest('[lang]').prop('lang');
var languages = Array.prototype.concat.call(
this._resolveLanguage(elementLanguage),
this._resolveLanguage(optionLanguage),
this._resolveLanguage(defaultLanguage),
this._resolveLanguage(parentLanguage)
);
options.language = languages;
return options;
};
Defaults.prototype._resolveLanguage = function (language) {
if (!language) {
return [];
}
if ($.isEmptyObject(language)) {
return [];
}
if ($.isPlainObject(language)) {
return [language];
}
var languages;
if (!$.isArray(language)) {
languages = [language];
} else {
languages = language;
}
var resolvedLanguages = [];
for (var l = 0; l < languages.length; l++) {
resolvedLanguages.push(languages[l]);
if (typeof languages[l] === 'string' && languages[l].indexOf('-') > 0) {
// Extract the region information if it is included
var languageParts = languages[l].split('-');
var baseLanguage = languageParts[0];
resolvedLanguages.push(baseLanguage);
}
}
return resolvedLanguages;
};
Defaults.prototype._processTranslations = function (languages, debug) {
var translations = new Translation();
for (var l = 0; l < languages.length; l++) {
var languageData = new Translation();
var language = languages[l];
if (typeof language === 'string') {
try {
// Try to load it with the original name
languageData = Translation.loadPath(language);
} catch (e) {
try {
// If we couldn't load it, check if it wasn't the full path
language = this.defaults.amdLanguageBase + language;
languageData = Translation.loadPath(language);
} catch (ex) {
// The translation could not be loaded at all. Sometimes this is
// because of a configuration problem, other times this can be
// because of how Select2 helps load all possible translation files
if (debug && window.console && console.warn) {
console.warn(
'Select2: The language file for "' + language + '" could ' +
'not be automatically loaded. A fallback will be used instead.'
);
}
}
}
} else if ($.isPlainObject(language)) {
languageData = new Translation(language);
} else {
languageData = language;
}
translations.extend(languageData);
}
return translations;
};
Defaults.prototype.set = function (key, value) {
var camelKey = $.camelCase(key);
var data = {};
data[camelKey] = value;
var convertedData = Utils._convertData(data);
$.extend(true, this.defaults, convertedData);
};
var defaults = new Defaults();
return defaults;
});
S2.define('select2/options',[
'require',
'jquery',
'./defaults',
'./utils'
], function (require, $, Defaults, Utils) {
function Options (options, $element) {
this.options = options;
if ($element != null) {
this.fromElement($element);
}
if ($element != null) {
this.options = Defaults.applyFromElement(this.options, $element);
}
this.options = Defaults.apply(this.options);
if ($element && $element.is('input')) {
var InputCompat = require(this.get('amdBase') + 'compat/inputData');
this.options.dataAdapter = Utils.Decorate(
this.options.dataAdapter,
InputCompat
);
}
}
Options.prototype.fromElement = function ($e) {
var excludedData = ['select2'];
if (this.options.multiple == null) {
this.options.multiple = $e.prop('multiple');
}
if (this.options.disabled == null) {
this.options.disabled = $e.prop('disabled');
}
if (this.options.dir == null) {
if ($e.prop('dir')) {
this.options.dir = $e.prop('dir');
} else if ($e.closest('[dir]').prop('dir')) {
this.options.dir = $e.closest('[dir]').prop('dir');
} else {
this.options.dir = 'ltr';
}
}
$e.prop('disabled', this.options.disabled);
$e.prop('multiple', this.options.multiple);
if (Utils.GetData($e[0], 'select2Tags')) {
if (this.options.debug && window.console && console.warn) {
console.warn(
'Select2: The `data-select2-tags` attribute has been changed to ' +
'use the `data-data` and `data-tags="true"` attributes and will be ' +
'removed in future versions of Select2.'
);
}
Utils.StoreData($e[0], 'data', Utils.GetData($e[0], 'select2Tags'));
Utils.StoreData($e[0], 'tags', true);
}
if (Utils.GetData($e[0], 'ajaxUrl')) {
if (this.options.debug && window.console && console.warn) {
console.warn(
'Select2: The `data-ajax-url` attribute has been changed to ' +
'`data-ajax--url` and support for the old attribute will be removed' +
' in future versions of Select2.'
);
}
$e.attr('ajax--url', Utils.GetData($e[0], 'ajaxUrl'));
Utils.StoreData($e[0], 'ajax-Url', Utils.GetData($e[0], 'ajaxUrl'));
}
var dataset = {};
function upperCaseLetter(_, letter) {
return letter.toUpperCase();
}
// Pre-load all of the attributes which are prefixed with `data-`
for (var attr = 0; attr < $e[0].attributes.length; attr++) {
var attributeName = $e[0].attributes[attr].name;
var prefix = 'data-';
if (attributeName.substr(0, prefix.length) == prefix) {
// Get the contents of the attribute after `data-`
var dataName = attributeName.substring(prefix.length);
// Get the data contents from the consistent source
// This is more than likely the jQuery data helper
var dataValue = Utils.GetData($e[0], dataName);
// camelCase the attribute name to match the spec
var camelDataName = dataName.replace(/-([a-z])/g, upperCaseLetter);
// Store the data attribute contents into the dataset since
dataset[camelDataName] = dataValue;
}
}
// Prefer the element's `dataset` attribute if it exists
// jQuery 1.x does not correctly handle data attributes with multiple dashes
if ($.fn.jquery && $.fn.jquery.substr(0, 2) == '1.' && $e[0].dataset) {
dataset = $.extend(true, {}, $e[0].dataset, dataset);
}
// Prefer our internal data cache if it exists
var data = $.extend(true, {}, Utils.GetData($e[0]), dataset);
data = Utils._convertData(data);
for (var key in data) {
if ($.inArray(key, excludedData) > -1) {
continue;
}
if ($.isPlainObject(this.options[key])) {
$.extend(this.options[key], data[key]);
} else {
this.options[key] = data[key];
}
}
return this;
};
Options.prototype.get = function (key) {
return this.options[key];
};
Options.prototype.set = function (key, val) {
this.options[key] = val;
};
return Options;
});
S2.define('select2/core',[
'jquery',
'./options',
'./utils',
'./keys'
], function ($, Options, Utils, KEYS) {
var Select2 = function ($element, options) {
if (Utils.GetData($element[0], 'select2') != null) {
Utils.GetData($element[0], 'select2').destroy();
}
this.$element = $element;
this.id = this._generateId($element);
options = options || {};
this.options = new Options(options, $element);
Select2.__super__.constructor.call(this);
// Set up the tabindex
var tabindex = $element.attr('tabindex') || 0;
Utils.StoreData($element[0], 'old-tabindex', tabindex);
$element.attr('tabindex', '-1');
// Set up containers and adapters
var DataAdapter = this.options.get('dataAdapter');
this.dataAdapter = new DataAdapter($element, this.options);
var $container = this.render();
this._placeContainer($container);
var SelectionAdapter = this.options.get('selectionAdapter');
this.selection = new SelectionAdapter($element, this.options);
this.$selection = this.selection.render();
this.selection.position(this.$selection, $container);
var DropdownAdapter = this.options.get('dropdownAdapter');
this.dropdown = new DropdownAdapter($element, this.options);
this.$dropdown = this.dropdown.render();
this.dropdown.position(this.$dropdown, $container);
var ResultsAdapter = this.options.get('resultsAdapter');
this.results = new ResultsAdapter($element, this.options, this.dataAdapter);
this.$results = this.results.render();
this.results.position(this.$results, this.$dropdown);
// Bind events
var self = this;
// Bind the container to all of the adapters
this._bindAdapters();
// Register any DOM event handlers
this._registerDomEvents();
// Register any internal event handlers
this._registerDataEvents();
this._registerSelectionEvents();
this._registerDropdownEvents();
this._registerResultsEvents();
this._registerEvents();
// Set the initial state
this.dataAdapter.current(function (initialData) {
self.trigger('selection:update', {
data: initialData
});
});
// Hide the original select
$element.addClass('select2-hidden-accessible');
$element.attr('aria-hidden', 'true');
// Synchronize any monitored attributes
this._syncAttributes();
Utils.StoreData($element[0], 'select2', this);
// Ensure backwards compatibility with $element.data('select2').
$element.data('select2', this);
};
Utils.Extend(Select2, Utils.Observable);
Select2.prototype._generateId = function ($element) {
var id = '';
if ($element.attr('id') != null) {
id = $element.attr('id');
} else if ($element.attr('name') != null) {
id = $element.attr('name') + '-' + Utils.generateChars(2);
} else {
id = Utils.generateChars(4);
}
id = id.replace(/(:|\.|\[|\]|,)/g, '');
id = 'select2-' + id;
return id;
};
Select2.prototype._placeContainer = function ($container) {
$container.insertAfter(this.$element);
var width = this._resolveWidth(this.$element, this.options.get('width'));
if (width != null) {
$container.css('width', width);
}
};
Select2.prototype._resolveWidth = function ($element, method) {
var WIDTH = /^width:(([-+]?([0-9]*\.)?[0-9]+)(px|em|ex|%|in|cm|mm|pt|pc))/i;
if (method == 'resolve') {
var styleWidth = this._resolveWidth($element, 'style');
if (styleWidth != null) {
return styleWidth;
}
return this._resolveWidth($element, 'element');
}
if (method == 'element') {
var elementWidth = $element.outerWidth(false);
if (elementWidth <= 0) {
return 'auto';
}
return elementWidth + 'px';
}
if (method == 'style') {
var style = $element.attr('style');
if (typeof(style) !== 'string') {
return null;
}
var attrs = style.split(';');
for (var i = 0, l = attrs.length; i < l; i = i + 1) {
var attr = attrs[i].replace(/\s/g, '');
var matches = attr.match(WIDTH);
if (matches !== null && matches.length >= 1) {
return matches[1];
}
}
return null;
}
if (method == 'computedstyle') {
var computedStyle = window.getComputedStyle($element[0]);
return computedStyle.width;
}
return method;
};
Select2.prototype._bindAdapters = function () {
this.dataAdapter.bind(this, this.$container);
this.selection.bind(this, this.$container);
this.dropdown.bind(this, this.$container);
this.results.bind(this, this.$container);
};
Select2.prototype._registerDomEvents = function () {
var self = this;
this.$element.on('change.select2', function () {
self.dataAdapter.current(function (data) {
self.trigger('selection:update', {
data: data
});
});
});
this.$element.on('focus.select2', function (evt) {
self.trigger('focus', evt);
});
this._syncA = Utils.bind(this._syncAttributes, this);
this._syncS = Utils.bind(this._syncSubtree, this);
if (this.$element[0].attachEvent) {
this.$element[0].attachEvent('onpropertychange', this._syncA);
}
var observer = window.MutationObserver ||
window.WebKitMutationObserver ||
window.MozMutationObserver
;
if (observer != null) {
this._observer = new observer(function (mutations) {
self._syncA();
self._syncS(null, mutations);
});
this._observer.observe(this.$element[0], {
attributes: true,
childList: true,
subtree: false
});
} else if (this.$element[0].addEventListener) {
this.$element[0].addEventListener(
'DOMAttrModified',
self._syncA,
false
);
this.$element[0].addEventListener(
'DOMNodeInserted',
self._syncS,
false
);
this.$element[0].addEventListener(
'DOMNodeRemoved',
self._syncS,
false
);
}
};
Select2.prototype._registerDataEvents = function () {
var self = this;
this.dataAdapter.on('*', function (name, params) {
self.trigger(name, params);
});
};
Select2.prototype._registerSelectionEvents = function () {
var self = this;
var nonRelayEvents = ['toggle', 'focus'];
this.selection.on('toggle', function () {
self.toggleDropdown();
});
this.selection.on('focus', function (params) {
self.focus(params);
});
this.selection.on('*', function (name, params) {
if ($.inArray(name, nonRelayEvents) !== -1) {
return;
}
self.trigger(name, params);
});
};
Select2.prototype._registerDropdownEvents = function () {
var self = this;
this.dropdown.on('*', function (name, params) {
self.trigger(name, params);
});
};
Select2.prototype._registerResultsEvents = function () {
var self = this;
this.results.on('*', function (name, params) {
self.trigger(name, params);
});
};
Select2.prototype._registerEvents = function () {
var self = this;
this.on('open', function () {
self.$container.addClass('select2-container--open');
});
this.on('close', function () {
self.$container.removeClass('select2-container--open');
});
this.on('enable', function () {
self.$container.removeClass('select2-container--disabled');
});
this.on('disable', function () {
self.$container.addClass('select2-container--disabled');
});
this.on('blur', function () {
self.$container.removeClass('select2-container--focus');
});
this.on('query', function (params) {
if (!self.isOpen()) {
self.trigger('open', {});
}
this.dataAdapter.query(params, function (data) {
self.trigger('results:all', {
data: data,
query: params
});
});
});
this.on('query:append', function (params) {
this.dataAdapter.query(params, function (data) {
self.trigger('results:append', {
data: data,
query: params
});
});
});
this.on('keypress', function (evt) {
var key = evt.which;
if (self.isOpen()) {
if (key === KEYS.ESC || key === KEYS.TAB ||
(key === KEYS.UP && evt.altKey)) {
self.close(evt);
evt.preventDefault();
} else if (key === KEYS.ENTER) {
self.trigger('results:select', {});
evt.preventDefault();
} else if ((key === KEYS.SPACE && evt.ctrlKey)) {
self.trigger('results:toggle', {});
evt.preventDefault();
} else if (key === KEYS.UP) {
self.trigger('results:previous', {});
evt.preventDefault();
} else if (key === KEYS.DOWN) {
self.trigger('results:next', {});
evt.preventDefault();
}
} else {
if (key === KEYS.ENTER || key === KEYS.SPACE ||
(key === KEYS.DOWN && evt.altKey)) {
self.open();
evt.preventDefault();
}
}
});
};
Select2.prototype._syncAttributes = function () {
this.options.set('disabled', this.$element.prop('disabled'));
if (this.isDisabled()) {
if (this.isOpen()) {
this.close();
}
this.trigger('disable', {});
} else {
this.trigger('enable', {});
}
};
Select2.prototype._isChangeMutation = function (evt, mutations) {
var changed = false;
var self = this;
// Ignore any mutation events raised for elements that aren't options or
// optgroups. This handles the case when the select element is destroyed
if (
evt && evt.target && (
evt.target.nodeName !== 'OPTION' && evt.target.nodeName !== 'OPTGROUP'
)
) {
return;
}
if (!mutations) {
// If mutation events aren't supported, then we can only assume that the
// change affected the selections
changed = true;
} else if (mutations.addedNodes && mutations.addedNodes.length > 0) {
for (var n = 0; n < mutations.addedNodes.length; n++) {
var node = mutations.addedNodes[n];
if (node.selected) {
changed = true;
}
}
} else if (mutations.removedNodes && mutations.removedNodes.length > 0) {
changed = true;
} else if ($.isArray(mutations)) {
$.each(mutations, function(evt, mutation) {
if (self._isChangeMutation(evt, mutation)) {
// We've found a change mutation.
// Let's escape from the loop and continue
changed = true;
return false;
}
});
}
return changed;
};
Select2.prototype._syncSubtree = function (evt, mutations) {
var changed = this._isChangeMutation(evt, mutations);
var self = this;
// Only re-pull the data if we think there is a change
if (changed) {
this.dataAdapter.current(function (currentData) {
self.trigger('selection:update', {
data: currentData
});
});
}
};
/**
* Override the trigger method to automatically trigger pre-events when
* there are events that can be prevented.
*/
Select2.prototype.trigger = function (name, args) {
var actualTrigger = Select2.__super__.trigger;
var preTriggerMap = {
'open': 'opening',
'close': 'closing',
'select': 'selecting',
'unselect': 'unselecting',
'clear': 'clearing'
};
if (args === undefined) {
args = {};
}
if (name in preTriggerMap) {
var preTriggerName = preTriggerMap[name];
var preTriggerArgs = {
prevented: false,
name: name,
args: args
};
actualTrigger.call(this, preTriggerName, preTriggerArgs);
if (preTriggerArgs.prevented) {
args.prevented = true;
return;
}
}
actualTrigger.call(this, name, args);
};
Select2.prototype.toggleDropdown = function () {
if (this.isDisabled()) {
return;
}
if (this.isOpen()) {
this.close();
} else {
this.open();
}
};
Select2.prototype.open = function () {
if (this.isOpen()) {
return;
}
if (this.isDisabled()) {
return;
}
this.trigger('query', {});
};
Select2.prototype.close = function (evt) {
if (!this.isOpen()) {
return;
}
this.trigger('close', { originalEvent : evt });
};
/**
* Helper method to abstract the "enabled" (not "disabled") state of this
* object.
*
* @return {true} if the instance is not disabled.
* @return {false} if the instance is disabled.
*/
Select2.prototype.isEnabled = function () {
return !this.isDisabled();
};
/**
* Helper method to abstract the "disabled" state of this object.
*
* @return {true} if the disabled option is true.
* @return {false} if the disabled option is false.
*/
Select2.prototype.isDisabled = function () {
return this.options.get('disabled');
};
Select2.prototype.isOpen = function () {
return this.$container.hasClass('select2-container--open');
};
Select2.prototype.hasFocus = function () {
return this.$container.hasClass('select2-container--focus');
};
Select2.prototype.focus = function (data) {
// No need to re-trigger focus events if we are already focused
if (this.hasFocus()) {
return;
}
this.$container.addClass('select2-container--focus');
this.trigger('focus', {});
};
Select2.prototype.enable = function (args) {
if (this.options.get('debug') && window.console && console.warn) {
console.warn(
'Select2: The `select2("enable")` method has been deprecated and will' +
' be removed in later Select2 versions. Use $element.prop("disabled")' +
' instead.'
);
}
if (args == null || args.length === 0) {
args = [true];
}
var disabled = !args[0];
this.$element.prop('disabled', disabled);
};
Select2.prototype.data = function () {
if (this.options.get('debug') &&
arguments.length > 0 && window.console && console.warn) {
console.warn(
'Select2: Data can no longer be set using `select2("data")`. You ' +
'should consider setting the value instead using `$element.val()`.'
);
}
var data = [];
this.dataAdapter.current(function (currentData) {
data = currentData;
});
return data;
};
Select2.prototype.val = function (args) {
if (this.options.get('debug') && window.console && console.warn) {
console.warn(
'Select2: The `select2("val")` method has been deprecated and will be' +
' removed in later Select2 versions. Use $element.val() instead.'
);
}
if (args == null || args.length === 0) {
return this.$element.val();
}
var newVal = args[0];
if ($.isArray(newVal)) {
newVal = $.map(newVal, function (obj) {
return obj.toString();
});
}
this.$element.val(newVal).trigger('input').trigger('change');
};
Select2.prototype.destroy = function () {
this.$container.remove();
if (this.$element[0].detachEvent) {
this.$element[0].detachEvent('onpropertychange', this._syncA);
}
if (this._observer != null) {
this._observer.disconnect();
this._observer = null;
} else if (this.$element[0].removeEventListener) {
this.$element[0]
.removeEventListener('DOMAttrModified', this._syncA, false);
this.$element[0]
.removeEventListener('DOMNodeInserted', this._syncS, false);
this.$element[0]
.removeEventListener('DOMNodeRemoved', this._syncS, false);
}
this._syncA = null;
this._syncS = null;
this.$element.off('.select2');
this.$element.attr('tabindex',
Utils.GetData(this.$element[0], 'old-tabindex'));
this.$element.removeClass('select2-hidden-accessible');
this.$element.attr('aria-hidden', 'false');
Utils.RemoveData(this.$element[0]);
this.$element.removeData('select2');
this.dataAdapter.destroy();
this.selection.destroy();
this.dropdown.destroy();
this.results.destroy();
this.dataAdapter = null;
this.selection = null;
this.dropdown = null;
this.results = null;
};
Select2.prototype.render = function () {
var $container = $(
'<span class="select2 select2-container">' +
'<span class="selection"></span>' +
'<span class="dropdown-wrapper" aria-hidden="true"></span>' +
'</span>'
);
$container.attr('dir', this.options.get('dir'));
this.$container = $container;
this.$container.addClass('select2-container--' + this.options.get('theme'));
Utils.StoreData($container[0], 'element', this.$element);
return $container;
};
return Select2;
});
S2.define('select2/compat/utils',[
'jquery'
], function ($) {
function syncCssClasses ($dest, $src, adapter) {
var classes, replacements = [], adapted;
classes = $.trim($dest.attr('class'));
if (classes) {
classes = '' + classes; // for IE which returns object
$(classes.split(/\s+/)).each(function () {
// Save all Select2 classes
if (this.indexOf('select2-') === 0) {
replacements.push(this);
}
});
}
classes = $.trim($src.attr('class'));
if (classes) {
classes = '' + classes; // for IE which returns object
$(classes.split(/\s+/)).each(function () {
// Only adapt non-Select2 classes
if (this.indexOf('select2-') !== 0) {
adapted = adapter(this);
if (adapted != null) {
replacements.push(adapted);
}
}
});
}
$dest.attr('class', replacements.join(' '));
}
return {
syncCssClasses: syncCssClasses
};
});
S2.define('select2/compat/containerCss',[
'jquery',
'./utils'
], function ($, CompatUtils) {
// No-op CSS adapter that discards all classes by default
function _containerAdapter (clazz) {
return null;
}
function ContainerCSS () { }
ContainerCSS.prototype.render = function (decorated) {
var $container = decorated.call(this);
var containerCssClass = this.options.get('containerCssClass') || '';
if ($.isFunction(containerCssClass)) {
containerCssClass = containerCssClass(this.$element);
}
var containerCssAdapter = this.options.get('adaptContainerCssClass');
containerCssAdapter = containerCssAdapter || _containerAdapter;
if (containerCssClass.indexOf(':all:') !== -1) {
containerCssClass = containerCssClass.replace(':all:', '');
var _cssAdapter = containerCssAdapter;
containerCssAdapter = function (clazz) {
var adapted = _cssAdapter(clazz);
if (adapted != null) {
// Append the old one along with the adapted one
return adapted + ' ' + clazz;
}
return clazz;
};
}
var containerCss = this.options.get('containerCss') || {};
if ($.isFunction(containerCss)) {
containerCss = containerCss(this.$element);
}
CompatUtils.syncCssClasses($container, this.$element, containerCssAdapter);
$container.css(containerCss);
$container.addClass(containerCssClass);
return $container;
};
return ContainerCSS;
});
S2.define('select2/compat/dropdownCss',[
'jquery',
'./utils'
], function ($, CompatUtils) {
// No-op CSS adapter that discards all classes by default
function _dropdownAdapter (clazz) {
return null;
}
function DropdownCSS () { }
DropdownCSS.prototype.render = function (decorated) {
var $dropdown = decorated.call(this);
var dropdownCssClass = this.options.get('dropdownCssClass') || '';
if ($.isFunction(dropdownCssClass)) {
dropdownCssClass = dropdownCssClass(this.$element);
}
var dropdownCssAdapter = this.options.get('adaptDropdownCssClass');
dropdownCssAdapter = dropdownCssAdapter || _dropdownAdapter;
if (dropdownCssClass.indexOf(':all:') !== -1) {
dropdownCssClass = dropdownCssClass.replace(':all:', '');
var _cssAdapter = dropdownCssAdapter;
dropdownCssAdapter = function (clazz) {
var adapted = _cssAdapter(clazz);
if (adapted != null) {
// Append the old one along with the adapted one
return adapted + ' ' + clazz;
}
return clazz;
};
}
var dropdownCss = this.options.get('dropdownCss') || {};
if ($.isFunction(dropdownCss)) {
dropdownCss = dropdownCss(this.$element);
}
CompatUtils.syncCssClasses($dropdown, this.$element, dropdownCssAdapter);
$dropdown.css(dropdownCss);
$dropdown.addClass(dropdownCssClass);
return $dropdown;
};
return DropdownCSS;
});
S2.define('select2/compat/initSelection',[
'jquery'
], function ($) {
function InitSelection (decorated, $element, options) {
if (options.get('debug') && window.console && console.warn) {
console.warn(
'Select2: The `initSelection` option has been deprecated in favor' +
' of a custom data adapter that overrides the `current` method. ' +
'This method is now called multiple times instead of a single ' +
'time when the instance is initialized. Support will be removed ' +
'for the `initSelection` option in future versions of Select2'
);
}
this.initSelection = options.get('initSelection');
this._isInitialized = false;
decorated.call(this, $element, options);
}
InitSelection.prototype.current = function (decorated, callback) {
var self = this;
if (this._isInitialized) {
decorated.call(this, callback);
return;
}
this.initSelection.call(null, this.$element, function (data) {
self._isInitialized = true;
if (!$.isArray(data)) {
data = [data];
}
callback(data);
});
};
return InitSelection;
});
S2.define('select2/compat/inputData',[
'jquery',
'../utils'
], function ($, Utils) {
function InputData (decorated, $element, options) {
this._currentData = [];
this._valueSeparator = options.get('valueSeparator') || ',';
if ($element.prop('type') === 'hidden') {
if (options.get('debug') && console && console.warn) {
console.warn(
'Select2: Using a hidden input with Select2 is no longer ' +
'supported and may stop working in the future. It is recommended ' +
'to use a `<select>` element instead.'
);
}
}
decorated.call(this, $element, options);
}
InputData.prototype.current = function (_, callback) {
function getSelected (data, selectedIds) {
var selected = [];
if (data.selected || $.inArray(data.id, selectedIds) !== -1) {
data.selected = true;
selected.push(data);
} else {
data.selected = false;
}
if (data.children) {
selected.push.apply(selected, getSelected(data.children, selectedIds));
}
return selected;
}
var selected = [];
for (var d = 0; d < this._currentData.length; d++) {
var data = this._currentData[d];
selected.push.apply(
selected,
getSelected(
data,
this.$element.val().split(
this._valueSeparator
)
)
);
}
callback(selected);
};
InputData.prototype.select = function (_, data) {
if (!this.options.get('multiple')) {
this.current(function (allData) {
$.map(allData, function (data) {
data.selected = false;
});
});
this.$element.val(data.id);
this.$element.trigger('input').trigger('change');
} else {
var value = this.$element.val();
value += this._valueSeparator + data.id;
this.$element.val(value);
this.$element.trigger('input').trigger('change');
}
};
InputData.prototype.unselect = function (_, data) {
var self = this;
data.selected = false;
this.current(function (allData) {
var values = [];
for (var d = 0; d < allData.length; d++) {
var item = allData[d];
if (data.id == item.id) {
continue;
}
values.push(item.id);
}
self.$element.val(values.join(self._valueSeparator));
self.$element.trigger('input').trigger('change');
});
};
InputData.prototype.query = function (_, params, callback) {
var results = [];
for (var d = 0; d < this._currentData.length; d++) {
var data = this._currentData[d];
var matches = this.matches(params, data);
if (matches !== null) {
results.push(matches);
}
}
callback({
results: results
});
};
InputData.prototype.addOptions = function (_, $options) {
var options = $.map($options, function ($option) {
return Utils.GetData($option[0], 'data');
});
this._currentData.push.apply(this._currentData, options);
};
return InputData;
});
S2.define('select2/compat/matcher',[
'jquery'
], function ($) {
function oldMatcher (matcher) {
function wrappedMatcher (params, data) {
var match = $.extend(true, {}, data);
if (params.term == null || $.trim(params.term) === '') {
return match;
}
if (data.children) {
for (var c = data.children.length - 1; c >= 0; c--) {
var child = data.children[c];
// Check if the child object matches
// The old matcher returned a boolean true or false
var doesMatch = matcher(params.term, child.text, child);
// If the child didn't match, pop it off
if (!doesMatch) {
match.children.splice(c, 1);
}
}
if (match.children.length > 0) {
return match;
}
}
if (matcher(params.term, data.text, data)) {
return match;
}
return null;
}
return wrappedMatcher;
}
return oldMatcher;
});
S2.define('select2/compat/query',[
], function () {
function Query (decorated, $element, options) {
if (options.get('debug') && window.console && console.warn) {
console.warn(
'Select2: The `query` option has been deprecated in favor of a ' +
'custom data adapter that overrides the `query` method. Support ' +
'will be removed for the `query` option in future versions of ' +
'Select2.'
);
}
decorated.call(this, $element, options);
}
Query.prototype.query = function (_, params, callback) {
params.callback = callback;
var query = this.options.get('query');
query.call(null, params);
};
return Query;
});
S2.define('select2/dropdown/attachContainer',[
], function () {
function AttachContainer (decorated, $element, options) {
decorated.call(this, $element, options);
}
AttachContainer.prototype.position =
function (decorated, $dropdown, $container) {
var $dropdownContainer = $container.find('.dropdown-wrapper');
$dropdownContainer.append($dropdown);
$dropdown.addClass('select2-dropdown--below');
$container.addClass('select2-container--below');
};
return AttachContainer;
});
S2.define('select2/dropdown/stopPropagation',[
], function () {
function StopPropagation () { }
StopPropagation.prototype.bind = function (decorated, container, $container) {
decorated.call(this, container, $container);
var stoppedEvents = [
'blur',
'change',
'click',
'dblclick',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'keypress',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseover',
'mouseup',
'search',
'touchend',
'touchstart'
];
this.$dropdown.on(stoppedEvents.join(' '), function (evt) {
evt.stopPropagation();
});
};
return StopPropagation;
});
S2.define('select2/selection/stopPropagation',[
], function () {
function StopPropagation () { }
StopPropagation.prototype.bind = function (decorated, container, $container) {
decorated.call(this, container, $container);
var stoppedEvents = [
'blur',
'change',
'click',
'dblclick',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'keypress',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseover',
'mouseup',
'search',
'touchend',
'touchstart'
];
this.$selection.on(stoppedEvents.join(' '), function (evt) {
evt.stopPropagation();
});
};
return StopPropagation;
});
/*!
* jQuery Mousewheel 3.1.13
*
* Copyright jQuery Foundation and other contributors
* Released under the MIT license
* http://jquery.org/license
*/
(function (factory) {
if ( typeof S2.define === 'function' && S2.define.amd ) {
// AMD. Register as an anonymous module.
S2.define('jquery-mousewheel',['jquery'], factory);
} else if (typeof exports === 'object') {
// Node/CommonJS style for Browserify
module.exports = factory;
} else {
// Browser globals
factory(jQuery);
}
}(function ($) {
var toFix = ['wheel', 'mousewheel', 'DOMMouseScroll', 'MozMousePixelScroll'],
toBind = ( 'onwheel' in document || document.documentMode >= 9 ) ?
['wheel'] : ['mousewheel', 'DomMouseScroll', 'MozMousePixelScroll'],
slice = Array.prototype.slice,
nullLowestDeltaTimeout, lowestDelta;
if ( $.event.fixHooks ) {
for ( var i = toFix.length; i; ) {
$.event.fixHooks[ toFix[--i] ] = $.event.mouseHooks;
}
}
var special = $.event.special.mousewheel = {
version: '3.1.12',
setup: function() {
if ( this.addEventListener ) {
for ( var i = toBind.length; i; ) {
this.addEventListener( toBind[--i], handler, false );
}
} else {
this.onmousewheel = handler;
}
// Store the line height and page height for this particular element
$.data(this, 'mousewheel-line-height', special.getLineHeight(this));
$.data(this, 'mousewheel-page-height', special.getPageHeight(this));
},
teardown: function() {
if ( this.removeEventListener ) {
for ( var i = toBind.length; i; ) {
this.removeEventListener( toBind[--i], handler, false );
}
} else {
this.onmousewheel = null;
}
// Clean up the data we added to the element
$.removeData(this, 'mousewheel-line-height');
$.removeData(this, 'mousewheel-page-height');
},
getLineHeight: function(elem) {
var $elem = $(elem),
$parent = $elem['offsetParent' in $.fn ? 'offsetParent' : 'parent']();
if (!$parent.length) {
$parent = $('body');
}
return parseInt($parent.css('fontSize'), 10) || parseInt($elem.css('fontSize'), 10) || 16;
},
getPageHeight: function(elem) {
return $(elem).height();
},
settings: {
adjustOldDeltas: true, // see shouldAdjustOldDeltas() below
normalizeOffset: true // calls getBoundingClientRect for each event
}
};
$.fn.extend({
mousewheel: function(fn) {
return fn ? this.bind('mousewheel', fn) : this.trigger('mousewheel');
},
unmousewheel: function(fn) {
return this.unbind('mousewheel', fn);
}
});
function handler(event) {
var orgEvent = event || window.event,
args = slice.call(arguments, 1),
delta = 0,
deltaX = 0,
deltaY = 0,
absDelta = 0,
offsetX = 0,
offsetY = 0;
event = $.event.fix(orgEvent);
event.type = 'mousewheel';
// Old school scrollwheel delta
if ( 'detail' in orgEvent ) { deltaY = orgEvent.detail * -1; }
if ( 'wheelDelta' in orgEvent ) { deltaY = orgEvent.wheelDelta; }
if ( 'wheelDeltaY' in orgEvent ) { deltaY = orgEvent.wheelDeltaY; }
if ( 'wheelDeltaX' in orgEvent ) { deltaX = orgEvent.wheelDeltaX * -1; }
// Firefox < 17 horizontal scrolling related to DOMMouseScroll event
if ( 'axis' in orgEvent && orgEvent.axis === orgEvent.HORIZONTAL_AXIS ) {
deltaX = deltaY * -1;
deltaY = 0;
}
// Set delta to be deltaY or deltaX if deltaY is 0 for backwards compatabilitiy
delta = deltaY === 0 ? deltaX : deltaY;
// New school wheel delta (wheel event)
if ( 'deltaY' in orgEvent ) {
deltaY = orgEvent.deltaY * -1;
delta = deltaY;
}
if ( 'deltaX' in orgEvent ) {
deltaX = orgEvent.deltaX;
if ( deltaY === 0 ) { delta = deltaX * -1; }
}
// No change actually happened, no reason to go any further
if ( deltaY === 0 && deltaX === 0 ) { return; }
// Need to convert lines and pages to pixels if we aren't already in pixels
// There are three delta modes:
// * deltaMode 0 is by pixels, nothing to do
// * deltaMode 1 is by lines
// * deltaMode 2 is by pages
if ( orgEvent.deltaMode === 1 ) {
var lineHeight = $.data(this, 'mousewheel-line-height');
delta *= lineHeight;
deltaY *= lineHeight;
deltaX *= lineHeight;
} else if ( orgEvent.deltaMode === 2 ) {
var pageHeight = $.data(this, 'mousewheel-page-height');
delta *= pageHeight;
deltaY *= pageHeight;
deltaX *= pageHeight;
}
// Store lowest absolute delta to normalize the delta values
absDelta = Math.max( Math.abs(deltaY), Math.abs(deltaX) );
if ( !lowestDelta || absDelta < lowestDelta ) {
lowestDelta = absDelta;
// Adjust older deltas if necessary
if ( shouldAdjustOldDeltas(orgEvent, absDelta) ) {
lowestDelta /= 40;
}
}
// Adjust older deltas if necessary
if ( shouldAdjustOldDeltas(orgEvent, absDelta) ) {
// Divide all the things by 40!
delta /= 40;
deltaX /= 40;
deltaY /= 40;
}
// Get a whole, normalized value for the deltas
delta = Math[ delta >= 1 ? 'floor' : 'ceil' ](delta / lowestDelta);
deltaX = Math[ deltaX >= 1 ? 'floor' : 'ceil' ](deltaX / lowestDelta);
deltaY = Math[ deltaY >= 1 ? 'floor' : 'ceil' ](deltaY / lowestDelta);
// Normalise offsetX and offsetY properties
if ( special.settings.normalizeOffset && this.getBoundingClientRect ) {
var boundingRect = this.getBoundingClientRect();
offsetX = event.clientX - boundingRect.left;
offsetY = event.clientY - boundingRect.top;
}
// Add information to the event object
event.deltaX = deltaX;
event.deltaY = deltaY;
event.deltaFactor = lowestDelta;
event.offsetX = offsetX;
event.offsetY = offsetY;
// Go ahead and set deltaMode to 0 since we converted to pixels
// Although this is a little odd since we overwrite the deltaX/Y
// properties with normalized deltas.
event.deltaMode = 0;
// Add event and delta to the front of the arguments
args.unshift(event, delta, deltaX, deltaY);
// Clearout lowestDelta after sometime to better
// handle multiple device types that give different
// a different lowestDelta
// Ex: trackpad = 3 and mouse wheel = 120
if (nullLowestDeltaTimeout) { clearTimeout(nullLowestDeltaTimeout); }
nullLowestDeltaTimeout = setTimeout(nullLowestDelta, 200);
return ($.event.dispatch || $.event.handle).apply(this, args);
}
function nullLowestDelta() {
lowestDelta = null;
}
function shouldAdjustOldDeltas(orgEvent, absDelta) {
// If this is an older event and the delta is divisable by 120,
// then we are assuming that the browser is treating this as an
// older mouse wheel event and that we should divide the deltas
// by 40 to try and get a more usable deltaFactor.
// Side note, this actually impacts the reported scroll distance
// in older browsers and can cause scrolling to be slower than native.
// Turn this off by setting $.event.special.mousewheel.settings.adjustOldDeltas to false.
return special.settings.adjustOldDeltas && orgEvent.type === 'mousewheel' && absDelta % 120 === 0;
}
}));
S2.define('jquery.select2',[
'jquery',
'jquery-mousewheel',
'./select2/core',
'./select2/defaults',
'./select2/utils'
], function ($, _, Select2, Defaults, Utils) {
if ($.fn.select2 == null) {
// All methods that should return the element
var thisMethods = ['open', 'close', 'destroy'];
$.fn.select2 = function (options) {
options = options || {};
if (typeof options === 'object') {
this.each(function () {
var instanceOptions = $.extend(true, {}, options);
var instance = new Select2($(this), instanceOptions);
});
return this;
} else if (typeof options === 'string') {
var ret;
var args = Array.prototype.slice.call(arguments, 1);
this.each(function () {
var instance = Utils.GetData(this, 'select2');
if (instance == null && window.console && console.error) {
console.error(
'The select2(\'' + options + '\') method was called on an ' +
'element that is not using Select2.'
);
}
ret = instance[options].apply(instance, args);
});
// Check if we should be returning `this`
if ($.inArray(options, thisMethods) > -1) {
return this;
}
return ret;
} else {
throw new Error('Invalid arguments for Select2: ' + options);
}
};
}
if ($.fn.select2.defaults == null) {
$.fn.select2.defaults = Defaults;
}
return Select2;
});
// Return the AMD loader configuration so it can be used outside of this file
return {
define: S2.define,
require: S2.require
};
}());
// Autoload the jQuery bindings
// We know that all of the modules exist above this, so we're safe
var select2 = S2.require('jquery.select2');
// Hold the AMD module references on the jQuery function that was just loaded
// This allows Select2 to use the internal loader outside of this file, such
// as in the language files.
jQuery.fn.select2.amd = S2;
// Return the Select2 instance for anyone who is importing it.
return select2;
}));
| Django-locallibrary/LocalLibrary/staticfiles/admin/js/vendor/select2/select2.full.c2afdeda3058.js/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/vendor/select2/select2.full.c2afdeda3058.js",
"repo_id": "Django-locallibrary",
"token_count": 75074
} | 8 |
"""Logic that powers autocompletion installed by ``pip completion``.
"""
import optparse
import os
import sys
from itertools import chain
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, create_command
from pip._internal.utils.misc import get_installed_distributions
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Iterable, List, Optional
def autocomplete():
# type: () -> None
"""Entry Point for completion of main and subcommand options.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
parser = create_main_parser()
subcommands = list(commands_dict)
options = []
# subcommand
subcommand_name = None # type: Optional[str]
for word in cwords:
if word in subcommands:
subcommand_name = word
break
# subcommand options
if subcommand_name is not None:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = (
subcommand_name in ['show', 'uninstall'] and
not current.startswith('-')
)
if should_list_installed:
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = create_command(subcommand_name)
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
# get completion type given cwords and available subcommand options
completion_type = get_path_completion_type(
cwords, cword, subcommand.parser.option_list_all,
)
# get completion files and directories if ``completion_type`` is
# ``<file>``, ``<dir>`` or ``<path>``
if completion_type:
paths = auto_complete_paths(current, completion_type)
options = [(path, 0) for path in paths]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
flattened_opts = chain.from_iterable(opts)
if current.startswith('-'):
for opt in flattened_opts:
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
else:
# get completion type given cwords and all available options
completion_type = get_path_completion_type(cwords, cword,
flattened_opts)
if completion_type:
subcommands = list(auto_complete_paths(current,
completion_type))
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def get_path_completion_type(cwords, cword, opts):
# type: (List[str], int, Iterable[Any]) -> Optional[str]
"""Get the type of path completion (``file``, ``dir``, ``path`` or None)
:param cwords: same as the environmental variable ``COMP_WORDS``
:param cword: same as the environmental variable ``COMP_CWORD``
:param opts: The available options to check
:return: path completion type (``file``, ``dir``, ``path`` or None)
"""
if cword < 2 or not cwords[cword - 2].startswith('-'):
return None
for opt in opts:
if opt.help == optparse.SUPPRESS_HELP:
continue
for o in str(opt).split('/'):
if cwords[cword - 2].split('=')[0] == o:
if not opt.metavar or any(
x in ('path', 'file', 'dir')
for x in opt.metavar.split('/')):
return opt.metavar
return None
def auto_complete_paths(current, completion_type):
# type: (str, str) -> Iterable[str]
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
directory, filename = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (x for x in os.listdir(current_path)
if os.path.normcase(x).startswith(filename))
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != 'dir' and os.path.isfile(opt):
yield comp_file
elif os.path.isdir(opt):
yield os.path.join(comp_file, '')
| Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/autocompletion.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/autocompletion.py",
"repo_id": "Django-locallibrary",
"token_count": 2833
} | 9 |
from __future__ import absolute_import
import locale
import logging
import os
import sys
import pip._vendor
from pip._vendor import pkg_resources
from pip._vendor.certifi import where
from pip import __file__ as pip_location
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import get_pip_version
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from types import ModuleType
from typing import List, Optional, Dict
from optparse import Values
from pip._internal.configuration import Configuration
logger = logging.getLogger(__name__)
def show_value(name, value):
# type: (str, Optional[str]) -> None
logger.info('%s: %s', name, value)
def show_sys_implementation():
# type: () -> None
logger.info('sys.implementation:')
if hasattr(sys, 'implementation'):
implementation = sys.implementation # type: ignore
implementation_name = implementation.name
else:
implementation_name = ''
with indent_log():
show_value('name', implementation_name)
def create_vendor_txt_map():
# type: () -> Dict[str, str]
vendor_txt_path = os.path.join(
os.path.dirname(pip_location),
'_vendor',
'vendor.txt'
)
with open(vendor_txt_path) as f:
# Purge non version specifying lines.
# Also, remove any space prefix or suffixes (including comments).
lines = [line.strip().split(' ', 1)[0]
for line in f.readlines() if '==' in line]
# Transform into "module" -> version dict.
return dict(line.split('==', 1) for line in lines) # type: ignore
def get_module_from_module_name(module_name):
# type: (str) -> ModuleType
# Module name can be uppercase in vendor.txt for some reason...
module_name = module_name.lower()
# PATCH: setuptools is actually only pkg_resources.
if module_name == 'setuptools':
module_name = 'pkg_resources'
__import__(
'pip._vendor.{}'.format(module_name),
globals(),
locals(),
level=0
)
return getattr(pip._vendor, module_name)
def get_vendor_version_from_module(module_name):
# type: (str) -> Optional[str]
module = get_module_from_module_name(module_name)
version = getattr(module, '__version__', None)
if not version:
# Try to find version in debundled module info
# The type for module.__file__ is Optional[str] in
# Python 2, and str in Python 3. The type: ignore is
# added to account for Python 2, instead of a cast
# and should be removed once we drop Python 2 support
pkg_set = pkg_resources.WorkingSet(
[os.path.dirname(module.__file__)] # type: ignore
)
package = pkg_set.find(pkg_resources.Requirement.parse(module_name))
version = getattr(package, 'version', None)
return version
def show_actual_vendor_versions(vendor_txt_versions):
# type: (Dict[str, str]) -> None
"""Log the actual version and print extra info if there is
a conflict or if the actual version could not be imported.
"""
for module_name, expected_version in vendor_txt_versions.items():
extra_message = ''
actual_version = get_vendor_version_from_module(module_name)
if not actual_version:
extra_message = ' (Unable to locate actual module version, using'\
' vendor.txt specified version)'
actual_version = expected_version
elif actual_version != expected_version:
extra_message = ' (CONFLICT: vendor.txt suggests version should'\
' be {})'.format(expected_version)
logger.info('%s==%s%s', module_name, actual_version, extra_message)
def show_vendor_versions():
# type: () -> None
logger.info('vendored library versions:')
vendor_txt_versions = create_vendor_txt_map()
with indent_log():
show_actual_vendor_versions(vendor_txt_versions)
def show_tags(options):
# type: (Values) -> None
tag_limit = 10
target_python = make_target_python(options)
tags = target_python.get_tags()
# Display the target options that were explicitly provided.
formatted_target = target_python.format_given()
suffix = ''
if formatted_target:
suffix = ' (target: {})'.format(formatted_target)
msg = 'Compatible tags: {}{}'.format(len(tags), suffix)
logger.info(msg)
if options.verbose < 1 and len(tags) > tag_limit:
tags_limited = True
tags = tags[:tag_limit]
else:
tags_limited = False
with indent_log():
for tag in tags:
logger.info(str(tag))
if tags_limited:
msg = (
'...\n'
'[First {tag_limit} tags shown. Pass --verbose to show all.]'
).format(tag_limit=tag_limit)
logger.info(msg)
def ca_bundle_info(config):
# type: (Configuration) -> str
levels = set()
for key, _ in config.items():
levels.add(key.split('.')[0])
if not levels:
return "Not specified"
levels_that_override_global = ['install', 'wheel', 'download']
global_overriding_level = [
level for level in levels if level in levels_that_override_global
]
if not global_overriding_level:
return 'global'
if 'global' in levels:
levels.remove('global')
return ", ".join(levels)
class DebugCommand(Command):
"""
Display debug information.
"""
usage = """
%prog <options>"""
ignore_require_venv = True
def add_options(self):
# type: () -> None
cmdoptions.add_target_python_options(self.cmd_opts)
self.parser.insert_option_group(0, self.cmd_opts)
self.parser.config.load()
def run(self, options, args):
# type: (Values, List[str]) -> int
logger.warning(
"This command is only meant for debugging. "
"Do not use this with automation for parsing and getting these "
"details, since the output and options of this command may "
"change without notice."
)
show_value('pip version', get_pip_version())
show_value('sys.version', sys.version)
show_value('sys.executable', sys.executable)
show_value('sys.getdefaultencoding', sys.getdefaultencoding())
show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())
show_value(
'locale.getpreferredencoding', locale.getpreferredencoding(),
)
show_value('sys.platform', sys.platform)
show_sys_implementation()
show_value("'cert' config value", ca_bundle_info(self.parser.config))
show_value("REQUESTS_CA_BUNDLE", os.environ.get('REQUESTS_CA_BUNDLE'))
show_value("CURL_CA_BUNDLE", os.environ.get('CURL_CA_BUNDLE'))
show_value("pip._vendor.certifi.where()", where())
show_value("pip._vendor.DEBUNDLED", pip._vendor.DEBUNDLED)
show_vendor_versions()
show_tags(options)
return SUCCESS
| Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/debug.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/debug.py",
"repo_id": "Django-locallibrary",
"token_count": 2924
} | 10 |
"""
For types associated with installation schemes.
For a general overview of available schemes and their context, see
https://docs.python.org/3/install/index.html#alternate-installation.
"""
SCHEME_KEYS = ['platlib', 'purelib', 'headers', 'scripts', 'data']
class Scheme(object):
"""A Scheme holds paths which are used as the base directories for
artifacts associated with a Python package.
"""
__slots__ = SCHEME_KEYS
def __init__(
self,
platlib, # type: str
purelib, # type: str
headers, # type: str
scripts, # type: str
data, # type: str
):
self.platlib = platlib
self.purelib = purelib
self.headers = headers
self.scripts = scripts
self.data = data
| Django-locallibrary/env/Lib/site-packages/pip/_internal/models/scheme.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/models/scheme.py",
"repo_id": "Django-locallibrary",
"token_count": 303
} | 11 |
"""Download files with progress indicators.
"""
import cgi
import logging
import mimetypes
import os
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE
from pip._internal.cli.progress_bars import DownloadProgressProvider
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.models.index import PyPI
from pip._internal.network.cache import is_from_cache
from pip._internal.network.utils import (
HEADERS,
raise_for_status,
response_chunks,
)
from pip._internal.utils.misc import (
format_size,
redact_auth_from_url,
splitext,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Iterable, Optional
from pip._vendor.requests.models import Response
from pip._internal.models.link import Link
from pip._internal.network.session import PipSession
logger = logging.getLogger(__name__)
def _get_http_response_size(resp):
# type: (Response) -> Optional[int]
try:
return int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
return None
def _prepare_download(
resp, # type: Response
link, # type: Link
progress_bar # type: str
):
# type: (...) -> Iterable[bytes]
total_length = _get_http_response_size(resp)
if link.netloc == PyPI.file_storage_domain:
url = link.show_url
else:
url = link.url_without_fragment
logged_url = redact_auth_from_url(url)
if total_length:
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
if is_from_cache(resp):
logger.info("Using cached %s", logged_url)
else:
logger.info("Downloading %s", logged_url)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif is_from_cache(resp):
show_progress = False
elif not total_length:
show_progress = True
elif total_length > (40 * 1000):
show_progress = True
else:
show_progress = False
chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
if not show_progress:
return chunks
return DownloadProgressProvider(
progress_bar, max=total_length
)(chunks)
def sanitize_content_filename(filename):
# type: (str) -> str
"""
Sanitize the "filename" value from a Content-Disposition header.
"""
return os.path.basename(filename)
def parse_content_disposition(content_disposition, default_filename):
# type: (str, str) -> str
"""
Parse the "filename" value from a Content-Disposition header, and
return the default filename if the result is empty.
"""
_type, params = cgi.parse_header(content_disposition)
filename = params.get('filename')
if filename:
# We need to sanitize the filename to prevent directory traversal
# in case the filename contains ".." path parts.
filename = sanitize_content_filename(filename)
return filename or default_filename
def _get_http_response_filename(resp, link):
# type: (Response, Link) -> str
"""Get an ideal filename from the given HTTP response, falling back to
the link filename if not provided.
"""
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition, filename)
ext = splitext(filename)[1] # type: Optional[str]
if not ext:
ext = mimetypes.guess_extension(
resp.headers.get('content-type', '')
)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
return filename
def _http_get_download(session, link):
# type: (PipSession, Link) -> Response
target_url = link.url.split('#', 1)[0]
resp = session.get(target_url, headers=HEADERS, stream=True)
raise_for_status(resp)
return resp
class Download(object):
def __init__(
self,
response, # type: Response
filename, # type: str
chunks, # type: Iterable[bytes]
):
# type: (...) -> None
self.response = response
self.filename = filename
self.chunks = chunks
class Downloader(object):
def __init__(
self,
session, # type: PipSession
progress_bar, # type: str
):
# type: (...) -> None
self._session = session
self._progress_bar = progress_bar
def __call__(self, link):
# type: (Link) -> Download
try:
resp = _http_get_download(self._session, link)
except NetworkConnectionError as e:
assert e.response is not None
logger.critical(
"HTTP error %s while getting %s", e.response.status_code, link
)
raise
return Download(
resp,
_get_http_response_filename(resp, link),
_prepare_download(resp, link, self._progress_bar),
)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/network/download.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/network/download.py",
"repo_id": "Django-locallibrary",
"token_count": 2019
} | 12 |
"""Metadata generation logic for source distributions.
"""
import os
from pip._internal.utils.subprocess import runner_with_spinner_message
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from pip._internal.build_env import BuildEnvironment
from pip._vendor.pep517.wrappers import Pep517HookCaller
def generate_metadata(build_env, backend):
# type: (BuildEnvironment, Pep517HookCaller) -> str
"""Generate metadata using mechanisms described in PEP 517.
Returns the generated metadata directory.
"""
metadata_tmpdir = TempDirectory(
kind="modern-metadata", globally_managed=True
)
metadata_dir = metadata_tmpdir.path
with build_env:
# Note that Pep517HookCaller implements a fallback for
# prepare_metadata_for_build_wheel, so we don't have to
# consider the possibility that this hook doesn't exist.
runner = runner_with_spinner_message("Preparing wheel metadata")
with backend.subprocess_runner(runner):
distinfo_dir = backend.prepare_metadata_for_build_wheel(
metadata_dir
)
return os.path.join(metadata_dir, distinfo_dir)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/build/metadata.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/build/metadata.py",
"repo_id": "Django-locallibrary",
"token_count": 435
} | 13 |
from __future__ import absolute_import
import collections
import logging
from pip._internal.utils.logging import indent_log
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .req_file import parse_requirements
from .req_install import InstallRequirement
from .req_set import RequirementSet
if MYPY_CHECK_RUNNING:
from typing import Iterator, List, Optional, Sequence, Tuple
__all__ = [
"RequirementSet", "InstallRequirement",
"parse_requirements", "install_given_reqs",
]
logger = logging.getLogger(__name__)
class InstallationResult(object):
def __init__(self, name):
# type: (str) -> None
self.name = name
def __repr__(self):
# type: () -> str
return "InstallationResult(name={!r})".format(self.name)
def _validate_requirements(
requirements, # type: List[InstallRequirement]
):
# type: (...) -> Iterator[Tuple[str, InstallRequirement]]
for req in requirements:
assert req.name, "invalid to-be-installed requirement: {}".format(req)
yield req.name, req
def install_given_reqs(
requirements, # type: List[InstallRequirement]
install_options, # type: List[str]
global_options, # type: Sequence[str]
root, # type: Optional[str]
home, # type: Optional[str]
prefix, # type: Optional[str]
warn_script_location, # type: bool
use_user_site, # type: bool
pycompile, # type: bool
):
# type: (...) -> List[InstallationResult]
"""
Install everything in the given list.
(to be called after having downloaded and unpacked the packages)
"""
to_install = collections.OrderedDict(_validate_requirements(requirements))
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join(to_install.keys()),
)
installed = []
with indent_log():
for req_name, requirement in to_install.items():
if requirement.should_reinstall:
logger.info('Attempting uninstall: %s', req_name)
with indent_log():
uninstalled_pathset = requirement.uninstall(
auto_confirm=True
)
else:
uninstalled_pathset = None
try:
requirement.install(
install_options,
global_options,
root=root,
home=home,
prefix=prefix,
warn_script_location=warn_script_location,
use_user_site=use_user_site,
pycompile=pycompile,
)
except Exception:
# if install did not succeed, rollback previous uninstall
if uninstalled_pathset and not requirement.install_succeeded:
uninstalled_pathset.rollback()
raise
else:
if uninstalled_pathset and requirement.install_succeeded:
uninstalled_pathset.commit()
installed.append(InstallationResult(req_name))
return installed
| Django-locallibrary/env/Lib/site-packages/pip/_internal/req/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/req/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 1384
} | 14 |
import collections
import logging
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.exceptions import (
DistributionNotFound,
InstallationError,
UnsupportedPythonVersion,
UnsupportedWheel,
)
from pip._internal.models.wheel import Wheel
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.misc import (
dist_in_site_packages,
dist_in_usersite,
get_installed_distributions,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import running_under_virtualenv
from .candidates import (
AlreadyInstalledCandidate,
EditableCandidate,
ExtrasCandidate,
LinkCandidate,
RequiresPythonCandidate,
)
from .requirements import (
ExplicitRequirement,
RequiresPythonRequirement,
SpecifierRequirement,
)
if MYPY_CHECK_RUNNING:
from typing import (
FrozenSet,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
)
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.pkg_resources import Distribution
from pip._vendor.resolvelib import ResolutionImpossible
from pip._internal.cache import CacheEntry, WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.resolution.base import InstallRequirementProvider
from .base import Candidate, Requirement
from .candidates import BaseCandidate
C = TypeVar("C")
Cache = Dict[Link, C]
VersionCandidates = Dict[_BaseVersion, Candidate]
logger = logging.getLogger(__name__)
class Factory(object):
def __init__(
self,
finder, # type: PackageFinder
preparer, # type: RequirementPreparer
make_install_req, # type: InstallRequirementProvider
wheel_cache, # type: Optional[WheelCache]
use_user_site, # type: bool
force_reinstall, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
py_version_info=None, # type: Optional[Tuple[int, ...]]
lazy_wheel=False, # type: bool
):
# type: (...) -> None
self._finder = finder
self.preparer = preparer
self._wheel_cache = wheel_cache
self._python_candidate = RequiresPythonCandidate(py_version_info)
self._make_install_req_from_spec = make_install_req
self._use_user_site = use_user_site
self._force_reinstall = force_reinstall
self._ignore_requires_python = ignore_requires_python
self.use_lazy_wheel = lazy_wheel
self._link_candidate_cache = {} # type: Cache[LinkCandidate]
self._editable_candidate_cache = {} # type: Cache[EditableCandidate]
if not ignore_installed:
self._installed_dists = {
canonicalize_name(dist.project_name): dist
for dist in get_installed_distributions()
}
else:
self._installed_dists = {}
@property
def force_reinstall(self):
# type: () -> bool
return self._force_reinstall
def _make_candidate_from_dist(
self,
dist, # type: Distribution
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
):
# type: (...) -> Candidate
base = AlreadyInstalledCandidate(dist, template, factory=self)
if extras:
return ExtrasCandidate(base, extras)
return base
def _make_candidate_from_link(
self,
link, # type: Link
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
name, # type: Optional[str]
version, # type: Optional[_BaseVersion]
):
# type: (...) -> Candidate
# TODO: Check already installed candidate, and use it if the link and
# editable flag match.
if template.editable:
if link not in self._editable_candidate_cache:
self._editable_candidate_cache[link] = EditableCandidate(
link, template, factory=self, name=name, version=version,
)
base = self._editable_candidate_cache[link] # type: BaseCandidate
else:
if link not in self._link_candidate_cache:
self._link_candidate_cache[link] = LinkCandidate(
link, template, factory=self, name=name, version=version,
)
base = self._link_candidate_cache[link]
if extras:
return ExtrasCandidate(base, extras)
return base
def _iter_found_candidates(
self,
ireqs, # type: Sequence[InstallRequirement]
specifier, # type: SpecifierSet
):
# type: (...) -> Iterable[Candidate]
if not ireqs:
return ()
# The InstallRequirement implementation requires us to give it a
# "template". Here we just choose the first requirement to represent
# all of them.
# Hopefully the Project model can correct this mismatch in the future.
template = ireqs[0]
name = canonicalize_name(template.req.name)
hashes = Hashes()
extras = frozenset() # type: FrozenSet[str]
for ireq in ireqs:
specifier &= ireq.req.specifier
hashes |= ireq.hashes(trust_internet=False)
extras |= frozenset(ireq.extras)
# We use this to ensure that we only yield a single candidate for
# each version (the finder's preferred one for that version). The
# requirement needs to return only one candidate per version, so we
# implement that logic here so that requirements using this helper
# don't all have to do the same thing later.
candidates = collections.OrderedDict() # type: VersionCandidates
# Get the installed version, if it matches, unless the user
# specified `--force-reinstall`, when we want the version from
# the index instead.
installed_version = None
installed_candidate = None
if not self._force_reinstall and name in self._installed_dists:
installed_dist = self._installed_dists[name]
installed_version = installed_dist.parsed_version
if specifier.contains(installed_version, prereleases=True):
installed_candidate = self._make_candidate_from_dist(
dist=installed_dist,
extras=extras,
template=template,
)
found = self._finder.find_best_candidate(
project_name=name,
specifier=specifier,
hashes=hashes,
)
for ican in found.iter_applicable():
if ican.version == installed_version and installed_candidate:
candidate = installed_candidate
else:
candidate = self._make_candidate_from_link(
link=ican.link,
extras=extras,
template=template,
name=name,
version=ican.version,
)
candidates[ican.version] = candidate
# Yield the installed version even if it is not found on the index.
if installed_version and installed_candidate:
candidates[installed_version] = installed_candidate
return six.itervalues(candidates)
def find_candidates(self, requirements, constraint):
# type: (Sequence[Requirement], SpecifierSet) -> Iterable[Candidate]
explicit_candidates = set() # type: Set[Candidate]
ireqs = [] # type: List[InstallRequirement]
for req in requirements:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(ireqs, constraint)
if constraint:
name = explicit_candidates.pop().name
raise InstallationError(
"Could not satisfy constraints for {!r}: installation from "
"path or url cannot be constrained to a version".format(name)
)
return (
c for c in explicit_candidates
if all(req.is_satisfied_by(c) for req in requirements)
)
def make_requirement_from_install_req(self, ireq, requested_extras):
# type: (InstallRequirement, Iterable[str]) -> Optional[Requirement]
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name, ireq.markers,
)
return None
if not ireq.link:
return SpecifierRequirement(ireq)
if ireq.link.is_wheel:
wheel = Wheel(ireq.link.filename)
if not wheel.supported(self._finder.target_python.get_tags()):
msg = "{} is not a supported wheel on this platform.".format(
wheel.filename,
)
raise UnsupportedWheel(msg)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
return self.make_requirement_from_candidate(cand)
def make_requirement_from_candidate(self, candidate):
# type: (Candidate) -> ExplicitRequirement
return ExplicitRequirement(candidate)
def make_requirement_from_spec(
self,
specifier, # type: str
comes_from, # type: InstallRequirement
requested_extras=(), # type: Iterable[str]
):
# type: (...) -> Optional[Requirement]
ireq = self._make_install_req_from_spec(specifier, comes_from)
return self.make_requirement_from_install_req(ireq, requested_extras)
def make_requires_python_requirement(self, specifier):
# type: (Optional[SpecifierSet]) -> Optional[Requirement]
if self._ignore_requires_python or specifier is None:
return None
return RequiresPythonRequirement(specifier, self._python_candidate)
def get_wheel_cache_entry(self, link, name):
# type: (Link, Optional[str]) -> Optional[CacheEntry]
"""Look up the link in the wheel cache.
If ``preparer.require_hashes`` is True, don't use the wheel cache,
because cached wheels, always built locally, have different hashes
than the files downloaded from the index server and thus throw false
hash mismatches. Furthermore, cached wheels at present have
nondeterministic contents due to file modification times.
"""
if self._wheel_cache is None or self.preparer.require_hashes:
return None
return self._wheel_cache.get_cache_entry(
link=link,
package_name=name,
supported_tags=get_supported(),
)
def get_dist_to_uninstall(self, candidate):
# type: (Candidate) -> Optional[Distribution]
# TODO: Are there more cases this needs to return True? Editable?
dist = self._installed_dists.get(candidate.name)
if dist is None: # Not installed, no uninstallation required.
return None
# We're installing into global site. The current installation must
# be uninstalled, no matter it's in global or user site, because the
# user site installation has precedence over global.
if not self._use_user_site:
return dist
# We're installing into user site. Remove the user site installation.
if dist_in_usersite(dist):
return dist
# We're installing into user site, but the installed incompatible
# package is in global site. We can't uninstall that, and would let
# the new user installation to "shadow" it. But shadowing won't work
# in virtual environments, so we error out.
if running_under_virtualenv() and dist_in_site_packages(dist):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to {} in {}".format(
dist.project_name, dist.location,
)
)
return None
def _report_requires_python_error(
self,
requirement, # type: RequiresPythonRequirement
template, # type: Candidate
):
# type: (...) -> UnsupportedPythonVersion
message_format = (
"Package {package!r} requires a different Python: "
"{version} not in {specifier!r}"
)
message = message_format.format(
package=template.name,
version=self._python_candidate.version,
specifier=str(requirement.specifier),
)
return UnsupportedPythonVersion(message)
def get_installation_error(self, e):
# type: (ResolutionImpossible) -> InstallationError
assert e.causes, "Installation error reported with no cause"
# If one of the things we can't solve is "we need Python X.Y",
# that is what we report.
for cause in e.causes:
if isinstance(cause.requirement, RequiresPythonRequirement):
return self._report_requires_python_error(
cause.requirement,
cause.parent,
)
# Otherwise, we have a set of causes which can't all be satisfied
# at once.
# The simplest case is when we have *one* cause that can't be
# satisfied. We just report that case.
if len(e.causes) == 1:
req, parent = e.causes[0]
if parent is None:
req_disp = str(req)
else:
req_disp = '{} (from {})'.format(req, parent.name)
logger.critical(
"Could not find a version that satisfies the requirement %s",
req_disp,
)
return DistributionNotFound(
'No matching distribution found for {}'.format(req)
)
# OK, we now have a list of requirements that can't all be
# satisfied at once.
# A couple of formatting helpers
def text_join(parts):
# type: (List[str]) -> str
if len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def readable_form(cand):
# type: (Candidate) -> str
return "{} {}".format(cand.name, cand.version)
def describe_trigger(parent):
# type: (Candidate) -> str
ireq = parent.get_install_requirement()
if not ireq or not ireq.comes_from:
return "{} {}".format(parent.name, parent.version)
if isinstance(ireq.comes_from, InstallRequirement):
return str(ireq.comes_from.name)
return str(ireq.comes_from)
triggers = []
for req, parent in e.causes:
if parent is None:
# This is a root requirement, so we can report it directly
trigger = req.format_for_error()
else:
trigger = describe_trigger(parent)
triggers.append(trigger)
if triggers:
info = text_join(triggers)
else:
info = "the requested packages"
msg = "Cannot install {} because these package versions " \
"have conflicting dependencies.".format(info)
logger.critical(msg)
msg = "\nThe conflict is caused by:"
for req, parent in e.causes:
msg = msg + "\n "
if parent:
msg = msg + "{} {} depends on ".format(
parent.name,
parent.version
)
else:
msg = msg + "The user requested "
msg = msg + req.format_for_error()
msg = msg + "\n\n" + \
"To fix this you could try to:\n" + \
"1. loosen the range of package versions you've specified\n" + \
"2. remove package versions to allow pip attempt to solve " + \
"the dependency conflict\n"
logger.info(msg)
return DistributionNotFound(
"ResolutionImpossible: for help visit "
"https://pip.pypa.io/en/latest/user_guide/"
"#fixing-conflicting-dependencies"
)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py",
"repo_id": "Django-locallibrary",
"token_count": 7412
} | 15 |
"""A helper module that injects SecureTransport, on import.
The import should be done as early as possible, to ensure all requests and
sessions (or whatever) are created after injecting SecureTransport.
Note that we only do the injection on macOS, when the linked OpenSSL is too
old to handle TLSv1.2.
"""
import sys
def inject_securetransport():
# type: () -> None
# Only relevant on macOS
if sys.platform != "darwin":
return
try:
import ssl
except ImportError:
return
# Checks for OpenSSL 1.0.1
if ssl.OPENSSL_VERSION_NUMBER >= 0x1000100f:
return
try:
from pip._vendor.urllib3.contrib import securetransport
except (ImportError, OSError):
return
securetransport.inject_into_urllib3()
inject_securetransport()
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/inject_securetransport.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/inject_securetransport.py",
"repo_id": "Django-locallibrary",
"token_count": 293
} | 16 |
from .core import contents, where
__version__ = "2020.06.20"
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/certifi/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/certifi/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 21
} | 17 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import BIG5_SM_MODEL
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
super(Big5Prober, self).__init__()
self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
self.distribution_analyzer = Big5DistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "Big5"
@property
def language(self):
return "Chinese"
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/big5prober.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/big5prober.py",
"repo_id": "Django-locallibrary",
"token_count": 511
} | 18 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
class MultiByteCharSetProber(CharSetProber):
"""
MultiByteCharSetProber
"""
def __init__(self, lang_filter=None):
super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
self.distribution_analyzer = None
self.coding_sm = None
self._last_char = [0, 0]
def reset(self):
super(MultiByteCharSetProber, self).reset()
if self.coding_sm:
self.coding_sm.reset()
if self.distribution_analyzer:
self.distribution_analyzer.reset()
self._last_char = [0, 0]
@property
def charset_name(self):
raise NotImplementedError
@property
def language(self):
raise NotImplementedError
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.distribution_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
return self.distribution_analyzer.get_confidence()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py",
"repo_id": "Django-locallibrary",
"token_count": 1400
} | 19 |
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/colorama/ansi.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/colorama/ansi.py",
"repo_id": "Django-locallibrary",
"token_count": 1256
} | 20 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME)
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME,
WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if version is not None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
# We hit a problem on Travis where enum34 was installed and doesn't
# have a provides attribute ...
if not hasattr(dist, 'provides'):
logger.debug('No "provides": %s', dist)
else:
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.modules = []
self.finder = finder = resources.finder_for_path(path)
if finder is None:
raise ValueError('finder unavailable for %s' % path)
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find(LEGACY_METADATA_FILENAME)
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
r = finder.find('REQUESTED')
self.requested = r is not None
p = os.path.join(path, 'top_level.txt')
if os.path.exists(p):
with open(p, 'rb') as f:
data = f.read().decode('utf-8')
self.modules = data.splitlines()
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
tl_path = tl_data = None
if path.endswith('.egg'):
if os.path.isdir(path):
p = os.path.join(path, 'EGG-INFO')
meta_path = os.path.join(p, 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(p, 'requires.txt')
tl_path = os.path.join(p, 'top_level.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
tl_path = os.path.join(path, 'top_level.txt')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
# look for top-level modules in top_level.txt, if present
if tl_data is None:
if tl_path is not None and os.path.exists(tl_path):
with open(tl_path, 'rb') as f:
tl_data = f.read().decode('utf-8')
if not tl_data:
tl_data = []
else:
tl_data = tl_data.splitlines()
self.modules = tl_data
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/database.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/database.py",
"repo_id": "Django-locallibrary",
"token_count": 23863
} | 21 |
"""
HTML parsing library based on the `WHATWG HTML specification
<https://whatwg.org/html>`_. The parser is designed to be compatible with
existing HTML found in the wild and implements well-defined error recovery that
is largely compatible with modern desktop web browsers.
Example usage::
from pip._vendor import html5lib
with open("my_document.html", "rb") as f:
tree = html5lib.parse(f)
For convenience, this module re-exports the following names:
* :func:`~.html5parser.parse`
* :func:`~.html5parser.parseFragment`
* :class:`~.html5parser.HTMLParser`
* :func:`~.treebuilders.getTreeBuilder`
* :func:`~.treewalkers.getTreeWalker`
* :func:`~.serializer.serialize`
"""
from __future__ import absolute_import, division, unicode_literals
from .html5parser import HTMLParser, parse, parseFragment
from .treebuilders import getTreeBuilder
from .treewalkers import getTreeWalker
from .serializer import serialize
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
"getTreeWalker", "serialize"]
# this has to be at the top level, see how setup.py parses this
#: Distribution version number.
__version__ = "1.1"
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 368
} | 22 |
from __future__ import absolute_import, division, unicode_literals
try:
from collections.abc import Mapping
except ImportError: # Python 2.7
from collections import Mapping
class Trie(Mapping):
"""Abstract base class for tries"""
def keys(self, prefix=None):
# pylint:disable=arguments-differ
keys = super(Trie, self).keys()
if prefix is None:
return set(keys)
return {x for x in keys if x.startswith(prefix)}
def has_keys_with_prefix(self, prefix):
for key in self.keys():
if key.startswith(prefix):
return True
return False
def longest_prefix(self, prefix):
if prefix in self:
return prefix
for i in range(1, len(prefix) + 1):
if prefix[:-i] in self:
return prefix[:-i]
raise KeyError(prefix)
def longest_prefix_item(self, prefix):
lprefix = self.longest_prefix(prefix)
return (lprefix, self[lprefix])
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_trie/_base.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_trie/_base.py",
"repo_id": "Django-locallibrary",
"token_count": 431
} | 23 |
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from . import base
from ..constants import namespaces, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class Filter(base.Filter):
"""Lints the token stream for errors
If it finds any errors, it'll raise an ``AssertionError``.
"""
def __init__(self, source, require_matching_tags=True):
"""Creates a Filter
:arg source: the source token stream
:arg require_matching_tags: whether or not to require matching tags
"""
super(Filter, self).__init__(source)
self.require_matching_tags = require_matching_tags
def __iter__(self):
open_elements = []
for token in base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(token["data"], dict)
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert type == "EmptyTag"
else:
assert type == "StartTag"
if type == "StartTag" and self.require_matching_tags:
open_elements.append((namespace, name))
for (namespace, name), value in token["data"].items():
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(value, text_type)
elif type == "EndTag":
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name}
elif self.require_matching_tags:
start = open_elements.pop()
assert start == (namespace, name)
elif type == "Comment":
data = token["data"]
assert isinstance(data, text_type)
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
assert isinstance(data, text_type)
assert data != ""
if type == "SpaceCharacters":
assert data.strip(spaceCharacters) == ""
elif type == "Doctype":
name = token["name"]
assert name is None or isinstance(name, text_type)
assert token["publicId"] is None or isinstance(name, text_type)
assert token["systemId"] is None or isinstance(name, text_type)
elif type == "Entity":
assert isinstance(token["name"], text_type)
elif type == "SerializerError":
assert isinstance(token["data"], text_type)
else:
assert False, "Unknown token type: %(type)s" % {"type": type}
yield token
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py",
"repo_id": "Django-locallibrary",
"token_count": 1687
} | 24 |
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from collections import OrderedDict
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import base
from .. import _ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("ascii", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
try:
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
except AttributeError:
pass
try:
node = et.getroot()
except AttributeError:
node = et
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(base.NonRecursiveTreeWalker):
def __init__(self, tree):
# pylint:disable=redefined-variable-type
if isinstance(tree, list):
self.fragmentChildren = set(tree)
tree = FragmentRoot(tree)
else:
self.fragmentChildren = set()
tree = Root(tree)
base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = _ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (base.DOCUMENT,)
elif isinstance(node, Doctype):
return base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return base.TEXT, ensure_str(node.obj)
elif node.tag == etree.Comment:
return base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
elif node in self.fragmentChildren:
return None
return node.getparent()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py",
"repo_id": "Django-locallibrary",
"token_count": 3093
} | 25 |
__version__ = '2.10'
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/idna/package_data.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/idna/package_data.py",
"repo_id": "Django-locallibrary",
"token_count": 10
} | 26 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import
import distutils.util
try:
from importlib.machinery import EXTENSION_SUFFIXES
except ImportError: # pragma: no cover
import imp
EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
del imp
import logging
import os
import platform
import re
import struct
import sys
import sysconfig
import warnings
from ._typing import TYPE_CHECKING, cast
if TYPE_CHECKING: # pragma: no cover
from typing import (
Dict,
FrozenSet,
IO,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
GlibcVersion = Tuple[int, int]
logger = logging.getLogger(__name__)
INTERPRETER_SHORT_NAMES = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
} # type: Dict[str, str]
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
class Tag(object):
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform"]
def __init__(self, interpreter, abi, platform):
# type: (str, str, str) -> None
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
@property
def interpreter(self):
# type: () -> str
return self._interpreter
@property
def abi(self):
# type: () -> str
return self._abi
@property
def platform(self):
# type: () -> str
return self._platform
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, Tag):
return NotImplemented
return (
(self.platform == other.platform)
and (self.abi == other.abi)
and (self.interpreter == other.interpreter)
)
def __hash__(self):
# type: () -> int
return hash((self._interpreter, self._abi, self._platform))
def __str__(self):
# type: () -> str
return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
def __repr__(self):
# type: () -> str
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
def parse_tag(tag):
# type: (str) -> FrozenSet[Tag]
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _warn_keyword_parameter(func_name, kwargs):
# type: (str, Dict[str, bool]) -> bool
"""
Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
"""
if not kwargs:
return False
elif len(kwargs) > 1 or "warn" not in kwargs:
kwargs.pop("warn", None)
arg = next(iter(kwargs.keys()))
raise TypeError(
"{}() got an unexpected keyword argument {!r}".format(func_name, arg)
)
return kwargs["warn"]
def _get_config_var(name, warn=False):
# type: (str, bool) -> Union[int, str, None]
value = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string):
# type: (str) -> str
return string.replace(".", "_").replace("-", "_")
def _abi3_applies(python_version):
# type: (PythonVersion) -> bool
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version, warn=False):
# type: (PythonVersion, bool) -> List[str]
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append("cp{version}".format(version=version))
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version=None, # type: Optional[PythonVersion]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
warn = _warn_keyword_parameter("cpython_tags", kwargs)
if not python_version:
python_version = sys.version_info[:2]
interpreter = "cp{}".format(_version_nodot(python_version[:2]))
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or _platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
yield tag
for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
yield tag
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi():
# type: () -> Iterator[str]
abi = sysconfig.get_config_var("SOABI")
if abi:
yield _normalize_string(abi)
def generic_tags(
interpreter=None, # type: Optional[str]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
warn = _warn_keyword_parameter("generic_tags", kwargs)
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
platforms = list(platforms or _platform_tags())
abis = list(abis)
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version):
# type: (PythonVersion) -> Iterator[str]
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield "py{version}".format(version=_version_nodot(py_version[:2]))
yield "py{major}".format(major=py_version[0])
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
def compatible_tags(
python_version=None, # type: Optional[PythonVersion]
interpreter=None, # type: Optional[str]
platforms=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[Tag]
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or _platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
# type: (str, bool) -> str
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version, cpu_arch):
# type: (MacVersion, str) -> List[str]
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
formats.append("universal")
return formats
def mac_platforms(version=None, arch=None):
# type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver() # type: ignore
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
for minor_version in range(version[1], -1, -1):
compat_version = version[0], minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
# From PEP 513.
def _is_manylinux_compatible(name, glibc_version):
# type: (str, GlibcVersion) -> bool
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
return bool(getattr(_manylinux, name + "_compatible"))
except (ImportError, AttributeError):
# Fall through to heuristic check below.
pass
return _have_compatible_glibc(*glibc_version)
def _glibc_version_string():
# type: () -> Optional[str]
# Returns glibc version string, or None if not using glibc.
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _glibc_version_string_confstr():
# type: () -> Optional[str]
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
try:
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
"CS_GNU_LIBC_VERSION"
)
assert version_string is not None
_, version = version_string.split() # type: Tuple[str, str]
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes():
# type: () -> Optional[str]
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# Note: typeshed is wrong here so we are ignoring this line.
process_namespace = ctypes.CDLL(None) # type: ignore
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version() # type: str
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
# Separated out from have_compatible_glibc for easier unit testing.
def _check_glibc_version(version_str, required_major, minimum_minor):
# type: (str, int, int) -> bool
# Parse string and check against requested version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
"Expected glibc version with 2 components major.minor,"
" got: %s" % version_str,
RuntimeWarning,
)
return False
return (
int(m.group("major")) == required_major
and int(m.group("minor")) >= minimum_minor
)
def _have_compatible_glibc(required_major, minimum_minor):
# type: (int, int) -> bool
version_str = _glibc_version_string()
if version_str is None:
return False
return _check_glibc_version(version_str, required_major, minimum_minor)
# Python does not provide platform information at sufficient granularity to
# identify the architecture of the running executable in some cases, so we
# determine it dynamically by reading the information from the running
# process. This only applies on Linux, which uses the ELF format.
class _ELFFileHeader(object):
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
class _InvalidELFFileHeader(ValueError):
"""
An invalid ELF file header was found.
"""
ELF_MAGIC_NUMBER = 0x7F454C46
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFDATA2LSB = 1
ELFDATA2MSB = 2
EM_386 = 3
EM_S390 = 22
EM_ARM = 40
EM_X86_64 = 62
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
def __init__(self, file):
# type: (IO[bytes]) -> None
def unpack(fmt):
# type: (str) -> int
try:
(result,) = struct.unpack(
fmt, file.read(struct.calcsize(fmt))
) # type: (int, )
except struct.error:
raise _ELFFileHeader._InvalidELFFileHeader()
return result
self.e_ident_magic = unpack(">I")
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_class = unpack("B")
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_data = unpack("B")
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_version = unpack("B")
self.e_ident_osabi = unpack("B")
self.e_ident_abiversion = unpack("B")
self.e_ident_pad = file.read(7)
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
self.e_type = unpack(format_h)
self.e_machine = unpack(format_h)
self.e_version = unpack(format_i)
self.e_entry = unpack(format_p)
self.e_phoff = unpack(format_p)
self.e_shoff = unpack(format_p)
self.e_flags = unpack(format_i)
self.e_ehsize = unpack(format_h)
self.e_phentsize = unpack(format_h)
self.e_phnum = unpack(format_h)
self.e_shentsize = unpack(format_h)
self.e_shnum = unpack(format_h)
self.e_shstrndx = unpack(format_h)
def _get_elf_header():
# type: () -> Optional[_ELFFileHeader]
try:
with open(sys.executable, "rb") as f:
elf_header = _ELFFileHeader(f)
except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
return None
return elf_header
def _is_linux_armhf():
# type: () -> bool
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_ARM
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABIMASK
) == elf_header.EF_ARM_ABI_VER5
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
) == elf_header.EF_ARM_ABI_FLOAT_HARD
return result
def _is_linux_i686():
# type: () -> bool
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_386
return result
def _have_compatible_manylinux_abi(arch):
# type: (str) -> bool
if arch == "armv7l":
return _is_linux_armhf()
if arch == "i686":
return _is_linux_i686()
return True
def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
# type: (bool) -> Iterator[str]
linux = _normalize_string(distutils.util.get_platform())
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv7l"
manylinux_support = []
_, arch = linux.split("_", 1)
if _have_compatible_manylinux_abi(arch):
if arch in {"x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"}:
manylinux_support.append(
("manylinux2014", (2, 17))
) # CentOS 7 w/ glibc 2.17 (PEP 599)
if arch in {"x86_64", "i686"}:
manylinux_support.append(
("manylinux2010", (2, 12))
) # CentOS 6 w/ glibc 2.12 (PEP 571)
manylinux_support.append(
("manylinux1", (2, 5))
) # CentOS 5 w/ glibc 2.5 (PEP 513)
manylinux_support_iter = iter(manylinux_support)
for name, glibc_version in manylinux_support_iter:
if _is_manylinux_compatible(name, glibc_version):
yield linux.replace("linux", name)
break
# Support for a later manylinux implies support for an earlier version.
for name, _ in manylinux_support_iter:
yield linux.replace("linux", name)
yield linux
def _generic_platforms():
# type: () -> Iterator[str]
yield _normalize_string(distutils.util.get_platform())
def _platform_tags():
# type: () -> Iterator[str]
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name():
# type: () -> str
"""
Returns the name of the running interpreter.
"""
try:
name = sys.implementation.name # type: ignore
except AttributeError: # pragma: no cover
# Python 2.7 compatibility.
name = platform.python_implementation().lower()
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(**kwargs):
# type: (bool) -> str
"""
Returns the version of the running interpreter.
"""
warn = _warn_keyword_parameter("interpreter_version", kwargs)
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version):
# type: (PythonVersion) -> str
if any(v >= 10 for v in version):
sep = "_"
else:
sep = ""
return sep.join(map(str, version))
def sys_tags(**kwargs):
# type: (bool) -> Iterator[Tag]
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
warn = _warn_keyword_parameter("sys_tags", kwargs)
interp_name = interpreter_name()
if interp_name == "cp":
for tag in cpython_tags(warn=warn):
yield tag
else:
for tag in generic_tags():
yield tag
for tag in compatible_tags():
yield tag
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/tags.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/tags.py",
"repo_id": "Django-locallibrary",
"token_count": 10219
} | 27 |
"""Check a project and backend by attempting to build using PEP 517 hooks.
"""
import argparse
import logging
import os
from os.path import isfile, join as pjoin
from pip._vendor.toml import TomlDecodeError, load as toml_load
import shutil
from subprocess import CalledProcessError
import sys
import tarfile
from tempfile import mkdtemp
import zipfile
from .colorlog import enable_colourful_output
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
log = logging.getLogger(__name__)
def check_build_sdist(hooks, build_sys_requires):
with BuildEnvironment() as env:
try:
env.pip_install(build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_sdist({})
log.info('Got build requires: %s', reqs)
except Exception:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build sdist in %s', td)
try:
try:
filename = hooks.build_sdist(td, {})
log.info('build_sdist returned %r', filename)
except Exception:
log.info('Failure in build_sdist', exc_info=True)
return False
if not filename.endswith('.tar.gz'):
log.error(
"Filename %s doesn't have .tar.gz extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info("Output file %s exists", path)
else:
log.error("Output file %s does not exist", path)
return False
if tarfile.is_tarfile(path):
log.info("Output file is a tar file")
else:
log.error("Output file is not a tar file")
return False
finally:
shutil.rmtree(td)
return True
def check_build_wheel(hooks, build_sys_requires):
with BuildEnvironment() as env:
try:
env.pip_install(build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_wheel({})
log.info('Got build requires: %s', reqs)
except Exception:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build wheel in %s', td)
try:
try:
filename = hooks.build_wheel(td, {})
log.info('build_wheel returned %r', filename)
except Exception:
log.info('Failure in build_wheel', exc_info=True)
return False
if not filename.endswith('.whl'):
log.error("Filename %s doesn't have .whl extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info("Output file %s exists", path)
else:
log.error("Output file %s does not exist", path)
return False
if zipfile.is_zipfile(path):
log.info("Output file is a zip file")
else:
log.error("Output file is not a zip file")
return False
finally:
shutil.rmtree(td)
return True
def check(source_dir):
pyproject = pjoin(source_dir, 'pyproject.toml')
if isfile(pyproject):
log.info('Found pyproject.toml')
else:
log.error('Missing pyproject.toml')
return False
try:
with open(pyproject) as f:
pyproject_data = toml_load(f)
# Ensure the mandatory data can be loaded
buildsys = pyproject_data['build-system']
requires = buildsys['requires']
backend = buildsys['build-backend']
backend_path = buildsys.get('backend-path')
log.info('Loaded pyproject.toml')
except (TomlDecodeError, KeyError):
log.error("Invalid pyproject.toml", exc_info=True)
return False
hooks = Pep517HookCaller(source_dir, backend, backend_path)
sdist_ok = check_build_sdist(hooks, requires)
wheel_ok = check_build_wheel(hooks, requires)
if not sdist_ok:
log.warning('Sdist checks failed; scroll up to see')
if not wheel_ok:
log.warning('Wheel checks failed')
return sdist_ok
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument(
'source_dir',
help="A directory containing pyproject.toml")
args = ap.parse_args(argv)
enable_colourful_output()
ok = check(args.source_dir)
if ok:
print(ansi('Checks passed', 'green'))
else:
print(ansi('Checks failed', 'red'))
sys.exit(1)
ansi_codes = {
'reset': '\x1b[0m',
'bold': '\x1b[1m',
'red': '\x1b[31m',
'green': '\x1b[32m',
}
def ansi(s, attr):
if os.name != 'nt' and sys.stdout.isatty():
return ansi_codes[attr] + str(s) + ansi_codes['reset']
else:
return str(s)
if __name__ == '__main__':
main()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/pep517/check.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/pep517/check.py",
"repo_id": "Django-locallibrary",
"token_count": 2763
} | 28 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
import sys
from . import Progress
class Bar(Progress):
width = 32
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
def update(self):
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
message = self.message % self
bar = self.fill * filled_length
empty = self.empty_fill * empty_length
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
suffix])
self.writeln(line)
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = '∙'
fill = '█'
class FillingSquaresBar(ChargingBar):
empty_fill = '▢'
fill = '▣'
class FillingCirclesBar(ChargingBar):
empty_fill = '◯'
fill = '◉'
class IncrementalBar(Bar):
if sys.platform.startswith('win'):
phases = (u' ', u'▌', u'█')
else:
phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
def update(self):
nphases = len(self.phases)
filled_len = self.width * self.progress
nfull = int(filled_len) # Number of full chars
phase = int((filled_len - nfull) * nphases) # Phase of last char
nempty = self.width - nfull # Number of empty chars
message = self.message % self
bar = self.phases[-1] * nfull
current = self.phases[phase] if phase > 0 else ''
empty = self.empty_fill * max(0, nempty - len(current))
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, current, empty,
self.bar_suffix, suffix])
self.writeln(line)
class PixelBar(IncrementalBar):
phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿')
class ShadyBar(IncrementalBar):
phases = (' ', '░', '▒', '▓', '█')
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/progress/bar.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/progress/bar.py",
"repo_id": "Django-locallibrary",
"token_count": 1183
} | 29 |
"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
from pip._vendor import idna
from pip._vendor import urllib3
from pip._vendor import chardet
from . import __version__ as requests_version
try:
from pip._vendor.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/help.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/help.py",
"repo_id": "Django-locallibrary",
"token_count": 1456
} | 30 |
import datetime
import io
from os import linesep
import re
import sys
from pip._vendor.toml.tz import TomlTz
if sys.version_info < (3,):
_range = xrange # noqa: F821
else:
unicode = str
_range = range
basestring = str
unichr = chr
def _detect_pathlib_path(p):
if (3, 4) <= sys.version_info:
import pathlib
if isinstance(p, pathlib.PurePath):
return True
return False
def _ispath(p):
if isinstance(p, (bytes, basestring)):
return True
return _detect_pathlib_path(p)
def _getpath(p):
if (3, 6) <= sys.version_info:
import os
return os.fspath(p)
if _detect_pathlib_path(p):
return str(p)
return p
try:
FNFError = FileNotFoundError
except NameError:
FNFError = IOError
TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
class TomlDecodeError(ValueError):
"""Base toml Exception / Error."""
def __init__(self, msg, doc, pos):
lineno = doc.count('\n', 0, pos) + 1
colno = pos - doc.rfind('\n', 0, pos)
emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos)
ValueError.__init__(self, emsg)
self.msg = msg
self.doc = doc
self.pos = pos
self.lineno = lineno
self.colno = colno
# Matches a TOML number, which allows underscores for readability
_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
class CommentValue(object):
def __init__(self, val, comment, beginline, _dict):
self.val = val
separator = "\n" if beginline else " "
self.comment = separator + comment
self._dict = _dict
def __getitem__(self, key):
return self.val[key]
def __setitem__(self, key, value):
self.val[key] = value
def dump(self, dump_value_func):
retstr = dump_value_func(self.val)
if isinstance(self.val, self._dict):
return self.comment + "\n" + unicode(retstr)
else:
return unicode(retstr) + self.comment
def _strictly_valid_num(n):
n = n.strip()
if not n:
return False
if n[0] == '_':
return False
if n[-1] == '_':
return False
if "_." in n or "._" in n:
return False
if len(n) == 1:
return True
if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']:
return False
if n[0] == '+' or n[0] == '-':
n = n[1:]
if len(n) > 1 and n[0] == '0' and n[1] != '.':
return False
if '__' in n:
return False
return True
def load(f, _dict=dict, decoder=None):
"""Parses named file or files as toml and returns a dictionary
Args:
f: Path to the file to open, array of files to read into single dict
or a file descriptor
_dict: (optional) Specifies the class of the returned toml dictionary
decoder: The decoder to use
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError -- When f is invalid type
TomlDecodeError: Error while decoding toml
IOError / FileNotFoundError -- When an array with no valid (existing)
(Python 2 / Python 3) file paths is passed
"""
if _ispath(f):
with io.open(_getpath(f), encoding='utf-8') as ffile:
return loads(ffile.read(), _dict, decoder)
elif isinstance(f, list):
from os import path as op
from warnings import warn
if not [path for path in f if op.exists(path)]:
error_msg = "Load expects a list to contain filenames only."
error_msg += linesep
error_msg += ("The list needs to contain the path of at least one "
"existing file.")
raise FNFError(error_msg)
if decoder is None:
decoder = TomlDecoder(_dict)
d = decoder.get_empty_table()
for l in f: # noqa: E741
if op.exists(l):
d.update(load(l, _dict, decoder))
else:
warn("Non-existent filename in list with at least one valid "
"filename")
return d
else:
try:
return loads(f.read(), _dict, decoder)
except AttributeError:
raise TypeError("You can only load a file descriptor, filename or "
"list")
_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
def loads(s, _dict=dict, decoder=None):
"""Parses string as toml
Args:
s: String to be parsed
_dict: (optional) Specifies the class of the returned toml dictionary
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError: When a non-string is passed
TomlDecodeError: Error while decoding toml
"""
implicitgroups = []
if decoder is None:
decoder = TomlDecoder(_dict)
retval = decoder.get_empty_table()
currentlevel = retval
if not isinstance(s, basestring):
raise TypeError("Expecting something like a string")
if not isinstance(s, unicode):
s = s.decode('utf8')
original = s
sl = list(s)
openarr = 0
openstring = False
openstrchar = ""
multilinestr = False
arrayoftables = False
beginline = True
keygroup = False
dottedkey = False
keyname = 0
key = ''
prev_key = ''
line_no = 1
for i, item in enumerate(sl):
if item == '\r' and sl[i + 1] == '\n':
sl[i] = ' '
continue
if keyname:
key += item
if item == '\n':
raise TomlDecodeError("Key name found without value."
" Reached end of line.", original, i)
if openstring:
if item == openstrchar:
oddbackslash = False
k = 1
while i >= k and sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
if not oddbackslash:
keyname = 2
openstring = False
openstrchar = ""
continue
elif keyname == 1:
if item.isspace():
keyname = 2
continue
elif item == '.':
dottedkey = True
continue
elif item.isalnum() or item == '_' or item == '-':
continue
elif (dottedkey and sl[i - 1] == '.' and
(item == '"' or item == "'")):
openstring = True
openstrchar = item
continue
elif keyname == 2:
if item.isspace():
if dottedkey:
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '.':
dottedkey = True
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '=':
keyname = 0
prev_key = key[:-1].rstrip()
key = ''
dottedkey = False
else:
raise TomlDecodeError("Found invalid character in key name: '" +
item + "'. Try quoting the key name.",
original, i)
if item == "'" and openstrchar != '"':
k = 1
try:
while sl[i - k] == "'":
k += 1
if k == 3:
break
except IndexError:
pass
if k == 3:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = "'"
else:
openstrchar = ""
if item == '"' and openstrchar != "'":
oddbackslash = False
k = 1
tripquote = False
try:
while sl[i - k] == '"':
k += 1
if k == 3:
tripquote = True
break
if k == 1 or (k == 3 and tripquote):
while sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
except IndexError:
pass
if not oddbackslash:
if tripquote:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = '"'
else:
openstrchar = ""
if item == '#' and (not openstring and not keygroup and
not arrayoftables):
j = i
comment = ""
try:
while sl[j] != '\n':
comment += s[j]
sl[j] = ' '
j += 1
except IndexError:
break
if not openarr:
decoder.preserve_comment(line_no, prev_key, comment, beginline)
if item == '[' and (not openstring and not keygroup and
not arrayoftables):
if beginline:
if len(sl) > i + 1 and sl[i + 1] == '[':
arrayoftables = True
else:
keygroup = True
else:
openarr += 1
if item == ']' and not openstring:
if keygroup:
keygroup = False
elif arrayoftables:
if sl[i - 1] == ']':
arrayoftables = False
else:
openarr -= 1
if item == '\n':
if openstring or multilinestr:
if not multilinestr:
raise TomlDecodeError("Unbalanced quotes", original, i)
if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
sl[i - 2] == sl[i - 1])):
sl[i] = sl[i - 1]
if sl[i - 3] == sl[i - 1]:
sl[i - 3] = ' '
elif openarr:
sl[i] = ' '
else:
beginline = True
line_no += 1
elif beginline and sl[i] != ' ' and sl[i] != '\t':
beginline = False
if not keygroup and not arrayoftables:
if sl[i] == '=':
raise TomlDecodeError("Found empty keyname. ", original, i)
keyname = 1
key += item
if keyname:
raise TomlDecodeError("Key name found without value."
" Reached end of file.", original, len(s))
if openstring: # reached EOF and have an unterminated string
raise TomlDecodeError("Unterminated string found."
" Reached end of file.", original, len(s))
s = ''.join(sl)
s = s.split('\n')
multikey = None
multilinestr = ""
multibackslash = False
pos = 0
for idx, line in enumerate(s):
if idx > 0:
pos += len(s[idx - 1]) + 1
decoder.embed_comments(idx, currentlevel)
if not multilinestr or multibackslash or '\n' not in multilinestr:
line = line.strip()
if line == "" and (not multikey or multibackslash):
continue
if multikey:
if multibackslash:
multilinestr += line
else:
multilinestr += line
multibackslash = False
closed = False
if multilinestr[0] == '[':
closed = line[-1] == ']'
elif len(line) > 2:
closed = (line[-1] == multilinestr[0] and
line[-2] == multilinestr[0] and
line[-3] == multilinestr[0])
if closed:
try:
value, vtype = decoder.load_value(multilinestr)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
currentlevel[multikey] = value
multikey = None
multilinestr = ""
else:
k = len(multilinestr) - 1
while k > -1 and multilinestr[k] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = multilinestr[:-1]
else:
multilinestr += "\n"
continue
if line[0] == '[':
arrayoftables = False
if len(line) == 1:
raise TomlDecodeError("Opening key group bracket on line by "
"itself.", original, pos)
if line[1] == '[':
arrayoftables = True
line = line[2:]
splitstr = ']]'
else:
line = line[1:]
splitstr = ']'
i = 1
quotesplits = decoder._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and splitstr in quotesplit:
break
i += quotesplit.count(splitstr)
quoted = not quoted
line = line.split(splitstr, i)
if len(line) < i + 1 or line[-1].strip() != "":
raise TomlDecodeError("Key group not on a line by itself.",
original, pos)
groups = splitstr.join(line[:-1]).split('.')
i = 0
while i < len(groups):
groups[i] = groups[i].strip()
if len(groups[i]) > 0 and (groups[i][0] == '"' or
groups[i][0] == "'"):
groupstr = groups[i]
j = i + 1
while not groupstr[0] == groupstr[-1]:
j += 1
if j > len(groups) + 2:
raise TomlDecodeError("Invalid group name '" +
groupstr + "' Something " +
"went wrong.", original, pos)
groupstr = '.'.join(groups[i:j]).strip()
groups[i] = groupstr[1:-1]
groups[i + 1:j] = []
else:
if not _groupname_re.match(groups[i]):
raise TomlDecodeError("Invalid group name '" +
groups[i] + "'. Try quoting it.",
original, pos)
i += 1
currentlevel = retval
for i in _range(len(groups)):
group = groups[i]
if group == "":
raise TomlDecodeError("Can't have a keygroup with an empty "
"name", original, pos)
try:
currentlevel[group]
if i == len(groups) - 1:
if group in implicitgroups:
implicitgroups.remove(group)
if arrayoftables:
raise TomlDecodeError("An implicitly defined "
"table can't be an array",
original, pos)
elif arrayoftables:
currentlevel[group].append(decoder.get_empty_table()
)
else:
raise TomlDecodeError("What? " + group +
" already exists?" +
str(currentlevel),
original, pos)
except TypeError:
currentlevel = currentlevel[-1]
if group not in currentlevel:
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
except KeyError:
if i != len(groups) - 1:
implicitgroups.append(group)
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
currentlevel = currentlevel[group]
if arrayoftables:
try:
currentlevel = currentlevel[-1]
except KeyError:
pass
elif line[0] == "{":
if line[-1] != "}":
raise TomlDecodeError("Line breaks are not allowed in inline"
"objects", original, pos)
try:
decoder.load_inline_object(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
elif "=" in line:
try:
ret = decoder.load_line(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
if ret is not None:
multikey, multilinestr, multibackslash = ret
return retval
def _load_date(val):
microsecond = 0
tz = None
try:
if len(val) > 19:
if val[19] == '.':
if val[-1].upper() == 'Z':
subsecondval = val[20:-1]
tzval = "Z"
else:
subsecondvalandtz = val[20:]
if '+' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('+')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
elif '-' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('-')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
else:
tzval = None
subsecondval = subsecondvalandtz
if tzval is not None:
tz = TomlTz(tzval)
microsecond = int(int(subsecondval) *
(10 ** (6 - len(subsecondval))))
else:
tz = TomlTz(val[19:])
except ValueError:
tz = None
if "-" not in val[1:]:
return None
try:
if len(val) == 10:
d = datetime.date(
int(val[:4]), int(val[5:7]),
int(val[8:10]))
else:
d = datetime.datetime(
int(val[:4]), int(val[5:7]),
int(val[8:10]), int(val[11:13]),
int(val[14:16]), int(val[17:19]), microsecond, tz)
except ValueError:
return None
return d
def _load_unicode_escapes(v, hexbytes, prefix):
skip = False
i = len(v) - 1
while i > -1 and v[i] == '\\':
skip = not skip
i -= 1
for hx in hexbytes:
if skip:
skip = False
i = len(hx) - 1
while i > -1 and hx[i] == '\\':
skip = not skip
i -= 1
v += prefix
v += hx
continue
hxb = ""
i = 0
hxblen = 4
if prefix == "\\U":
hxblen = 8
hxb = ''.join(hx[i:i + hxblen]).lower()
if hxb.strip('0123456789abcdef'):
raise ValueError("Invalid escape sequence: " + hxb)
if hxb[0] == "d" and hxb[1].strip('01234567'):
raise ValueError("Invalid escape sequence: " + hxb +
". Only scalar unicode points are allowed.")
v += unichr(int(hxb, 16))
v += unicode(hx[len(hxb):])
return v
# Unescape TOML string values.
# content after the \
_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
# What it should be replaced by
_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
# Used for substitution
_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
def _unescape(v):
"""Unescape characters in a TOML string."""
i = 0
backslash = False
while i < len(v):
if backslash:
backslash = False
if v[i] in _escapes:
v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
elif v[i] == '\\':
v = v[:i - 1] + v[i:]
elif v[i] == 'u' or v[i] == 'U':
i += 1
else:
raise ValueError("Reserved escape sequence used")
continue
elif v[i] == '\\':
backslash = True
i += 1
return v
class InlineTableDict(object):
"""Sentinel subclass of dict for inline tables."""
class TomlDecoder(object):
def __init__(self, _dict=dict):
self._dict = _dict
def get_empty_table(self):
return self._dict()
def get_empty_inline_table(self):
class DynamicInlineTableDict(self._dict, InlineTableDict):
"""Concrete sentinel subclass for inline tables.
It is a subclass of _dict which is passed in dynamically at load
time
It is also a subclass of InlineTableDict
"""
return DynamicInlineTableDict()
def load_inline_object(self, line, currentlevel, multikey=False,
multibackslash=False):
candidate_groups = line[1:-1].split(",")
groups = []
if len(candidate_groups) == 1 and not candidate_groups[0].strip():
candidate_groups.pop()
while len(candidate_groups) > 0:
candidate_group = candidate_groups.pop(0)
try:
_, value = candidate_group.split('=', 1)
except ValueError:
raise ValueError("Invalid inline table encountered")
value = value.strip()
if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
value[0] in '-0123456789' or
value in ('true', 'false') or
(value[0] == "[" and value[-1] == "]") or
(value[0] == '{' and value[-1] == '}'))):
groups.append(candidate_group)
elif len(candidate_groups) > 0:
candidate_groups[0] = (candidate_group + "," +
candidate_groups[0])
else:
raise ValueError("Invalid inline table value encountered")
for group in groups:
status = self.load_line(group, currentlevel, multikey,
multibackslash)
if status is not None:
break
def _get_split_on_quotes(self, line):
doublequotesplits = line.split('"')
quoted = False
quotesplits = []
if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
singlequotesplits = doublequotesplits[0].split("'")
doublequotesplits = doublequotesplits[1:]
while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
singlequotesplits[-1] += '"' + doublequotesplits[0]
doublequotesplits = doublequotesplits[1:]
if "'" in singlequotesplits[-1]:
singlequotesplits = (singlequotesplits[:-1] +
singlequotesplits[-1].split("'"))
quotesplits += singlequotesplits
for doublequotesplit in doublequotesplits:
if quoted:
quotesplits.append(doublequotesplit)
else:
quotesplits += doublequotesplit.split("'")
quoted = not quoted
return quotesplits
def load_line(self, line, currentlevel, multikey, multibackslash):
i = 1
quotesplits = self._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and '=' in quotesplit:
break
i += quotesplit.count('=')
quoted = not quoted
pair = line.split('=', i)
strictly_valid = _strictly_valid_num(pair[-1])
if _number_with_underscores.match(pair[-1]):
pair[-1] = pair[-1].replace('_', '')
while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
pair[-1][0] != "'" and pair[-1][0] != '"' and
pair[-1][0] != '[' and pair[-1][0] != '{' and
pair[-1].strip() != 'true' and
pair[-1].strip() != 'false'):
try:
float(pair[-1])
break
except ValueError:
pass
if _load_date(pair[-1]) is not None:
break
if TIME_RE.match(pair[-1]):
break
i += 1
prev_val = pair[-1]
pair = line.split('=', i)
if prev_val == pair[-1]:
raise ValueError("Invalid date or number")
if strictly_valid:
strictly_valid = _strictly_valid_num(pair[-1])
pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
if '.' in pair[0]:
if '"' in pair[0] or "'" in pair[0]:
quotesplits = self._get_split_on_quotes(pair[0])
quoted = False
levels = []
for quotesplit in quotesplits:
if quoted:
levels.append(quotesplit)
else:
levels += [level.strip() for level in
quotesplit.split('.')]
quoted = not quoted
else:
levels = pair[0].split('.')
while levels[-1] == "":
levels = levels[:-1]
for level in levels[:-1]:
if level == "":
continue
if level not in currentlevel:
currentlevel[level] = self.get_empty_table()
currentlevel = currentlevel[level]
pair[0] = levels[-1].strip()
elif (pair[0][0] == '"' or pair[0][0] == "'") and \
(pair[0][-1] == pair[0][0]):
pair[0] = _unescape(pair[0][1:-1])
k, koffset = self._load_line_multiline_str(pair[1])
if k > -1:
while k > -1 and pair[1][k + koffset] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = pair[1][:-1]
else:
multilinestr = pair[1] + "\n"
multikey = pair[0]
else:
value, vtype = self.load_value(pair[1], strictly_valid)
try:
currentlevel[pair[0]]
raise ValueError("Duplicate keys!")
except TypeError:
raise ValueError("Duplicate keys!")
except KeyError:
if multikey:
return multikey, multilinestr, multibackslash
else:
currentlevel[pair[0]] = value
def _load_line_multiline_str(self, p):
poffset = 0
if len(p) < 3:
return -1, poffset
if p[0] == '[' and (p.strip()[-1] != ']' and
self._load_array_isstrarray(p)):
newp = p[1:].strip().split(',')
while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
newp = newp[-1]
poffset = len(p) - len(newp)
p = newp
if p[0] != '"' and p[0] != "'":
return -1, poffset
if p[1] != p[0] or p[2] != p[0]:
return -1, poffset
if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
return -1, poffset
return len(p) - 1, poffset
def load_value(self, v, strictly_valid=True):
if not v:
raise ValueError("Empty value is invalid")
if v == 'true':
return (True, "bool")
elif v == 'false':
return (False, "bool")
elif v[0] == '"' or v[0] == "'":
quotechar = v[0]
testv = v[1:].split(quotechar)
triplequote = False
triplequotecount = 0
if len(testv) > 1 and testv[0] == '' and testv[1] == '':
testv = testv[2:]
triplequote = True
closed = False
for tv in testv:
if tv == '':
if triplequote:
triplequotecount += 1
else:
closed = True
else:
oddbackslash = False
try:
i = -1
j = tv[i]
while j == '\\':
oddbackslash = not oddbackslash
i -= 1
j = tv[i]
except IndexError:
pass
if not oddbackslash:
if closed:
raise ValueError("Found tokens after a closed " +
"string. Invalid TOML.")
else:
if not triplequote or triplequotecount > 1:
closed = True
else:
triplequotecount = 0
if quotechar == '"':
escapeseqs = v.split('\\')[1:]
backslash = False
for i in escapeseqs:
if i == '':
backslash = not backslash
else:
if i[0] not in _escapes and (i[0] != 'u' and
i[0] != 'U' and
not backslash):
raise ValueError("Reserved escape sequence used")
if backslash:
backslash = False
for prefix in ["\\u", "\\U"]:
if prefix in v:
hexbytes = v.split(prefix)
v = _load_unicode_escapes(hexbytes[0], hexbytes[1:],
prefix)
v = _unescape(v)
if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or
v[1] == v[2]):
v = v[2:-2]
return (v[1:-1], "str")
elif v[0] == '[':
return (self.load_array(v), "array")
elif v[0] == '{':
inline_object = self.get_empty_inline_table()
self.load_inline_object(v, inline_object)
return (inline_object, "inline_object")
elif TIME_RE.match(v):
h, m, s, _, ms = TIME_RE.match(v).groups()
time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0)
return (time, "time")
else:
parsed_date = _load_date(v)
if parsed_date is not None:
return (parsed_date, "date")
if not strictly_valid:
raise ValueError("Weirdness with leading zeroes or "
"underscores in your number.")
itype = "int"
neg = False
if v[0] == '-':
neg = True
v = v[1:]
elif v[0] == '+':
v = v[1:]
v = v.replace('_', '')
lowerv = v.lower()
if '.' in v or ('x' not in v and ('e' in v or 'E' in v)):
if '.' in v and v.split('.', 1)[1] == '':
raise ValueError("This float is missing digits after "
"the point")
if v[0] not in '0123456789':
raise ValueError("This float doesn't have a leading "
"digit")
v = float(v)
itype = "float"
elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'):
v = float(v)
itype = "float"
if itype == "int":
v = int(v, 0)
if neg:
return (0 - v, itype)
return (v, itype)
def bounded_string(self, s):
if len(s) == 0:
return True
if s[-1] != s[0]:
return False
i = -2
backslash = False
while len(s) + i > 0:
if s[i] == "\\":
backslash = not backslash
i -= 1
else:
break
return not backslash
def _load_array_isstrarray(self, a):
a = a[1:-1].strip()
if a != '' and (a[0] == '"' or a[0] == "'"):
return True
return False
def load_array(self, a):
atype = None
retval = []
a = a.strip()
if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
strarray = self._load_array_isstrarray(a)
if not a[1:-1].strip().startswith('{'):
a = a[1:-1].split(',')
else:
# a is an inline object, we must find the matching parenthesis
# to define groups
new_a = []
start_group_index = 1
end_group_index = 2
open_bracket_count = 1 if a[start_group_index] == '{' else 0
in_str = False
while end_group_index < len(a[1:]):
if a[end_group_index] == '"' or a[end_group_index] == "'":
if in_str:
backslash_index = end_group_index - 1
while (backslash_index > -1 and
a[backslash_index] == '\\'):
in_str = not in_str
backslash_index -= 1
in_str = not in_str
if not in_str and a[end_group_index] == '{':
open_bracket_count += 1
if in_str or a[end_group_index] != '}':
end_group_index += 1
continue
elif a[end_group_index] == '}' and open_bracket_count > 1:
open_bracket_count -= 1
end_group_index += 1
continue
# Increase end_group_index by 1 to get the closing bracket
end_group_index += 1
new_a.append(a[start_group_index:end_group_index])
# The next start index is at least after the closing
# bracket, a closing bracket can be followed by a comma
# since we are in an array.
start_group_index = end_group_index + 1
while (start_group_index < len(a[1:]) and
a[start_group_index] != '{'):
start_group_index += 1
end_group_index = start_group_index + 1
a = new_a
b = 0
if strarray:
while b < len(a) - 1:
ab = a[b].strip()
while (not self.bounded_string(ab) or
(len(ab) > 2 and
ab[0] == ab[1] == ab[2] and
ab[-2] != ab[0] and
ab[-3] != ab[0])):
a[b] = a[b] + ',' + a[b + 1]
ab = a[b].strip()
if b < len(a) - 2:
a = a[:b + 1] + a[b + 2:]
else:
a = a[:b + 1]
b += 1
else:
al = list(a[1:-1])
a = []
openarr = 0
j = 0
for i in _range(len(al)):
if al[i] == '[':
openarr += 1
elif al[i] == ']':
openarr -= 1
elif al[i] == ',' and not openarr:
a.append(''.join(al[j:i]))
j = i + 1
a.append(''.join(al[j:]))
for i in _range(len(a)):
a[i] = a[i].strip()
if a[i] != '':
nval, ntype = self.load_value(a[i])
if atype:
if ntype != atype:
raise ValueError("Not a homogeneous array")
else:
atype = ntype
retval.append(nval)
return retval
def preserve_comment(self, line_no, key, comment, beginline):
pass
def embed_comments(self, idx, currentlevel):
pass
class TomlPreserveCommentDecoder(TomlDecoder):
def __init__(self, _dict=dict):
self.saved_comments = {}
super(TomlPreserveCommentDecoder, self).__init__(_dict)
def preserve_comment(self, line_no, key, comment, beginline):
self.saved_comments[line_no] = (key, comment, beginline)
def embed_comments(self, idx, currentlevel):
if idx not in self.saved_comments:
return
key, comment, beginline = self.saved_comments[idx]
currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
self._dict)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/toml/decoder.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/toml/decoder.py",
"repo_id": "Django-locallibrary",
"token_count": 22539
} | 31 |
from __future__ import absolute_import
import re
import datetime
import logging
import os
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
from .packages.six.moves.http_client import HTTPException # noqa: F401
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try:
# Python 3: not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError:
# Python 2
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
assert_fingerprint,
create_urllib3_context,
ssl_wrap_socket,
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {"http": 80, "https": 443}
# When it comes time to update this value as a part of regular maintenance
# (ie test_recent_date is failing) update it to ~6 months before the current date.
RECENT_DATE = datetime.date(2019, 1, 1)
_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme["http"]
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if not six.PY2:
kw.pop("strict", None)
# Pre-set source_address.
self.source_address = kw.get("source_address")
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop("socket_options", self.default_socket_options)
_HTTPConnection.__init__(self, *args, **kw)
@property
def host(self):
"""
Getter method to remove any trailing dots that indicate the hostname is an FQDN.
In general, SSL certificates don't include the trailing dot indicating a
fully-qualified domain name, and thus, they don't validate properly when
checked against a domain name that includes the dot. In addition, some
servers may not expect to receive the trailing dot when provided.
However, the hostname with trailing dot is critical to DNS resolution; doing a
lookup with the trailing dot will properly only resolve the appropriate FQDN,
whereas a lookup without a trailing dot will search the system's search domain
list. Thus, it's important to keep the original host around for use only in
those cases where it's appropriate (i.e., when doing DNS lookup to establish the
actual TCP connection across which we're going to send HTTP requests).
"""
return self._dns_host.rstrip(".")
@host.setter
def host(self, value):
"""
Setter for the `host` property.
We assume that only urllib3 uses the _dns_host attribute; httplib itself
only uses `host`, and it seems reasonable that other libraries follow suit.
"""
self._dns_host = value
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw["source_address"] = self.source_address
if self.socket_options:
extra_kw["socket_options"] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# Google App Engine's httplib does not define _tunnel_host
if getattr(self, "_tunnel_host", None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def putrequest(self, method, url, *args, **kwargs):
"""Send a request to the server"""
match = _CONTAINS_CONTROL_CHAR_RE.search(method)
if match:
raise ValueError(
"Method cannot contain non-token characters %r (found at least %r)"
% (method, match.group())
)
return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = "accept-encoding" in headers
skip_host = "host" in headers
self.putrequest(
method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if "transfer-encoding" not in headers:
self.putheader("Transfer-Encoding", "chunked")
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode("utf8")
len_str = hex(len(chunk))[2:]
self.send(len_str.encode("utf-8"))
self.send(b"\r\n")
self.send(chunk)
self.send(b"\r\n")
# After the if clause, to always have a closed body
self.send(b"0\r\n\r\n")
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme["https"]
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ca_cert_data = None
ssl_version = None
assert_fingerprint = None
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
key_password=None,
strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
ssl_context=None,
server_hostname=None,
**kw
):
HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
self.ssl_context = ssl_context
self.server_hostname = server_hostname
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = "https"
def set_cert(
self,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
ca_cert_data=None,
):
"""
This method should only be called once, before the connection is used.
"""
# If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
# have an SSLContext object in which case we'll use its verify_mode.
if cert_reqs is None:
if self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
else:
cert_reqs = resolve_cert_reqs(None)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
self.ca_cert_data = ca_cert_data
def connect(self):
# Add certificate verification
conn = self._new_conn()
hostname = self.host
# Google App Engine's httplib does not define _tunnel_host
if getattr(self, "_tunnel_host", None):
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
server_hostname = hostname
if self.server_hostname is not None:
server_hostname = self.server_hostname
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn(
(
"System time is way off (before {0}). This will probably "
"lead to SSL verification errors"
).format(RECENT_DATE),
SystemTimeWarning,
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
default_ssl_context = False
if self.ssl_context is None:
default_ssl_context = True
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
# Try to load OS default certs if none are given.
# Works well on Windows (requires Python3.4+)
if (
not self.ca_certs
and not self.ca_cert_dir
and not self.ca_cert_data
and default_ssl_context
and hasattr(context, "load_default_certs")
):
context.load_default_certs()
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
key_password=self.key_password,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
server_hostname=server_hostname,
ssl_context=context,
)
if self.assert_fingerprint:
assert_fingerprint(
self.sock.getpeercert(binary_form=True), self.assert_fingerprint
)
elif (
context.verify_mode != ssl.CERT_NONE
and not getattr(context, "check_hostname", False)
and self.assert_hostname is not False
):
# While urllib3 attempts to always turn off hostname matching from
# the TLS library, this cannot always be done. So we check whether
# the TLS Library still thinks it's matching hostnames.
cert = self.sock.getpeercert()
if not cert.get("subjectAltName", ()):
warnings.warn(
(
"Certificate for {0} has no `subjectAltName`, falling back to check for a "
"`commonName` for now. This feature is being removed by major browsers and "
"deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
"for details.)".format(hostname)
),
SubjectAltNameWarning,
)
_match_hostname(cert, self.assert_hostname or server_hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None
)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.warning(
"Certificate did not match expected hostname: %s. Certificate: %s",
asserted_hostname,
cert,
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if not ssl:
HTTPSConnection = DummyConnection # noqa: F811
VerifiedHTTPSConnection = HTTPSConnection
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/connection.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/connection.py",
"repo_id": "Django-locallibrary",
"token_count": 6246
} | 32 |
"""
Low-level helpers for the SecureTransport bindings.
These are Python functions that are not directly related to the high-level APIs
but are necessary to get them to work. They include a whole bunch of low-level
CoreFoundation messing about and memory management. The concerns in this module
are almost entirely about trying to avoid memory leaks and providing
appropriate and useful assistance to the higher-level code.
"""
import base64
import ctypes
import itertools
import re
import os
import ssl
import tempfile
from .bindings import Security, CoreFoundation, CFConst
# This regular expression is used to grab PEM data out of a PEM bundle.
_PEM_CERTS_RE = re.compile(
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
)
def _cf_data_from_bytes(bytestring):
"""
Given a bytestring, create a CFData object from it. This CFData object must
be CFReleased by the caller.
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
)
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
def _cf_string_to_unicode(value):
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
"""
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p, CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError("Error copying C string from CFStringRef")
string = buffer.value
if string is not None:
string = string.decode("utf-8")
return string
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u"":
output = u"OSStatus %s" % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
def _cert_array_from_pem(pem_bundle):
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
"""
# Normalize the PEM bundle's line endings.
pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
der_certs = [
base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
return cert_array
def _is_cert(item):
"""
Returns True if a given CFTypeRef is a certificate.
"""
expected = Security.SecCertificateGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _is_identity(item):
"""
Returns True if a given CFTypeRef is an identity.
"""
expected = Security.SecIdentityGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
"""
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path, len(password), password, False, None, ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory
def _load_items_from_file(keychain, path):
"""
Given a single file, loads all the trust objects from it into arrays and
the keychain.
Returns a tuple of lists: the first list is a list of identities, the
second a list of certs.
"""
certificates = []
identities = []
result_array = None
with open(path, "rb") as f:
raw_filedata = f.read()
try:
filedata = CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
)
result_array = CoreFoundation.CFArrayRef()
result = Security.SecItemImport(
filedata, # cert data
None, # Filename, leaving it out for now
None, # What the type of the file is, we don't care
None, # what's in the file, we don't care
0, # import flags
None, # key params, can include passphrase in the future
keychain, # The keychain to insert into
ctypes.byref(result_array), # Results
)
_assert_no_error(result)
# A CFArray is not very useful to us as an intermediary
# representation, so we are going to extract the objects we want
# and then free the array. We don't need to keep hold of keys: the
# keychain already has them!
result_count = CoreFoundation.CFArrayGetCount(result_array)
for index in range(result_count):
item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
if _is_cert(item):
CoreFoundation.CFRetain(item)
certificates.append(item)
elif _is_identity(item):
CoreFoundation.CFRetain(item)
identities.append(item)
finally:
if result_array:
CoreFoundation.CFRelease(result_array)
CoreFoundation.CFRelease(filedata)
return (identities, certificates)
def _load_client_cert_chain(keychain, *paths):
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
"""
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
paths = (path for path in paths if path)
try:
for file_path in paths:
new_identities, new_certs = _load_items_from_file(keychain, file_path)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain, certificates[0], ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py",
"repo_id": "Django-locallibrary",
"token_count": 4489
} | 33 |
class SetuptoolsDeprecationWarning(Warning):
"""
Base class for warning deprecations in ``setuptools``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
| Django-locallibrary/env/Lib/site-packages/setuptools/_deprecation_warning.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_deprecation_warning.py",
"repo_id": "Django-locallibrary",
"token_count": 68
} | 34 |
"""distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
import os
from distutils.errors import \
DistutilsExecError, \
CompileError, LibError, LinkError, UnknownFileError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError as msg:
raise CompileError(msg)
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError as msg:
print(msg)
raise CompileError(msg)
# preprocess()
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/bcppcompiler.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/bcppcompiler.py",
"repo_id": "Django-locallibrary",
"token_count": 7272
} | 35 |
"""distutils.command.build
Implements the Distutils 'build' command."""
import sys, os
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build(Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('parallel=', 'j',
"number of parallel build jobs"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
self.parallel = None
def finalize_options(self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
# plat-name only supported for windows (other platforms are
# supported via ./configure flags, if at all). Avoid misleading
# other platforms.
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2])
# Make it so Python 2.x and Python 2.x with --with-pydebug don't
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-%d.%d' % sys.version_info[:2])
if self.executable is None and sys.executable:
self.executable = os.path.normpath(sys.executable)
if isinstance(self.parallel, str):
try:
self.parallel = int(self.parallel)
except ValueError:
raise DistutilsOptionError("parallel should be an integer")
def run(self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules(self):
return self.distribution.has_pure_modules()
def has_c_libraries(self):
return self.distribution.has_c_libraries()
def has_ext_modules(self):
return self.distribution.has_ext_modules()
def has_scripts(self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/build.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/build.py",
"repo_id": "Django-locallibrary",
"token_count": 2569
} | 36 |
"""
distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to a package
index).
"""
import os
import io
import hashlib
from base64 import standard_b64encode
from urllib.request import urlopen, Request, HTTPError
from urllib.parse import urlparse
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256)
# https://bugs.python.org/issue40698
_FILE_CONTENT_DIGESTS = {
"md5_digest": getattr(hashlib, "md5", None),
"sha256_digest": getattr(hashlib, "sha256", None),
"blake2_256_digest": getattr(hashlib, "blake2b", None),
}
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def run(self):
if not self.distribution.dist_files:
msg = ("Must create and upload files in one command "
"(e.g. setup.py sdist upload)")
raise DistutilsOptionError(msg)
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protocol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
# additional meta-data
'metadata_version': '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
data['comment'] = ''
# file content digests
for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items():
if digest_cons is None:
continue
try:
data[digest_name] = digest_cons(content).hexdigest()
except ValueError:
# hash digest not available or blocked by security policy
pass
if self.sign:
with open(filename + ".asc", "rb") as f:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
f.read())
# set up the authentication
user_pass = (self.username + ":" + self.password).encode('ascii')
# The exact encoding of the authentication string is debated.
# Anyway PyPI only accepts ascii for both username or password.
auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\r\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--\r\n'
body = io.BytesIO()
for key, value in data.items():
title = '\r\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = str(value).encode('utf-8')
body.write(sep_boundary)
body.write(title.encode('utf-8'))
body.write(b"\r\n\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
msg = "Submitting %s to %s" % (filename, self.repository)
self.announce(msg, log.INFO)
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth,
}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
except HTTPError as e:
status = e.code
reason = e.msg
except OSError as e:
self.announce(str(e), log.ERROR)
raise
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
if self.show_response:
text = self._read_pypi_response(result)
msg = '\n'.join(('-' * 75, text, '-' * 75))
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (status, reason)
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/upload.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/upload.py",
"repo_id": "Django-locallibrary",
"token_count": 3587
} | 37 |
"""distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
import sys
import os
import subprocess
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils.debug import DEBUG
from distutils import log
if sys.platform == 'darwin':
_cfg_target = None
_cfg_target_split = None
def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None):
"""Run another program, specified as a command list 'cmd', in a new process.
'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
# cmd is documented as a list, but just in case some code passes a tuple
# in, protect our %-formatting code against horrible death
cmd = list(cmd)
log.info(' '.join(cmd))
if dry_run:
return
if search_path:
executable = find_executable(cmd[0])
if executable is not None:
cmd[0] = executable
env = env if env is not None else dict(os.environ)
if sys.platform == 'darwin':
global _cfg_target, _cfg_target_split
if _cfg_target is None:
from distutils import sysconfig
_cfg_target = sysconfig.get_config_var(
'MACOSX_DEPLOYMENT_TARGET') or ''
if _cfg_target:
_cfg_target_split = [int(x) for x in _cfg_target.split('.')]
if _cfg_target:
# ensure that the deployment target of build process is not less
# than that used when the interpreter was built. This ensures
# extension modules are built with correct compatibility values
cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
'now "%s" but "%s" during configure'
% (cur_target, _cfg_target))
raise DistutilsPlatformError(my_msg)
env.update(MACOSX_DEPLOYMENT_TARGET=cur_target)
try:
proc = subprocess.Popen(cmd, env=env)
proc.wait()
exitcode = proc.returncode
except OSError as exc:
if not DEBUG:
cmd = cmd[0]
raise DistutilsExecError(
"command %r failed: %s" % (cmd, exc.args[-1])) from exc
if exitcode:
if not DEBUG:
cmd = cmd[0]
raise DistutilsExecError(
"command %r failed with exit code %s" % (cmd, exitcode))
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
_, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if os.path.isfile(executable):
return executable
if path is None:
path = os.environ.get('PATH', None)
if path is None:
try:
path = os.confstr("CS_PATH")
except (AttributeError, ValueError):
# os.confstr() or CS_PATH is not available
path = os.defpath
# bpo-35755: Don't use os.defpath if the PATH environment variable is
# set to an empty string
# PATH='' doesn't match, whereas PATH=':' looks in the current directory
if not path:
return None
paths = path.split(os.pathsep)
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/spawn.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/spawn.py",
"repo_id": "Django-locallibrary",
"token_count": 1797
} | 38 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import re
from .version import InvalidVersion, Version
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
def canonicalize_version(version):
"""
This is very similar to Version.__str__, but has one subtle differences
with the way it handles the release segment.
"""
try:
version = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
parts = []
# Epoch
if version.epoch != 0:
parts.append("{0}!".format(version.epoch))
# Release segment
# NB: This strips trailing '.0's to normalize
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
# Pre-release
if version.pre is not None:
parts.append("".join(str(x) for x in version.pre))
# Post-release
if version.post is not None:
parts.append(".post{0}".format(version.post))
# Development release
if version.dev is not None:
parts.append(".dev{0}".format(version.dev))
# Local version segment
if version.local is not None:
parts.append("+{0}".format(version.local))
return "".join(parts)
| Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/utils.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/utils.py",
"repo_id": "Django-locallibrary",
"token_count": 541
} | 39 |
import os
import sys
from itertools import product, starmap
import distutils.command.install_lib as orig
class install_lib(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, sep, child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(sys, 'implementation'):
return
base = os.path.join(
'__pycache__', '__init__.' + sys.implementation.cache_tag)
yield base + '.pyc'
yield base + '.pyo'
yield base + '.opt-1.pyc'
yield base + '.opt-2.pyc'
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",
dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| Django-locallibrary/env/Lib/site-packages/setuptools/command/install_lib.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/install_lib.py",
"repo_id": "Django-locallibrary",
"token_count": 1718
} | 40 |
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
import sys
import re
import os
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools. This usage is discouraged "
"and may exhibit undesirable behaviors or errors. Please use "
"Setuptools' objects directly or at least import Setuptools first.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
warn_distutils_present()
if enabled():
ensure_local_distutils()
| Django-locallibrary/env/Lib/site-packages/setuptools/distutils_patch.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/distutils_patch.py",
"repo_id": "Django-locallibrary",
"token_count": 640
} | 41 |
"""
Compatibility Support for Python 2.7 and earlier
"""
import sys
import platform
from setuptools.extern import six
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if six.PY2:
def get_all_headers(message, key): # noqa
return message.getheaders(key)
linux_py2_ascii = (
platform.system() == 'Linux' and
six.PY2
)
rmtree_safe = str if linux_py2_ascii else lambda x: x
"""Workaround for http://bugs.python.org/issue24672"""
try:
from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from ._imp import get_frozen_object, get_module
except ImportError:
import imp
from imp import PY_COMPILED, PY_FROZEN, PY_SOURCE # noqa
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == imp.PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_frozen_object(module, paths):
return imp.get_frozen_object(module)
def get_module(module, paths, info):
imp.load_module(module, *info)
return sys.modules[module]
| Django-locallibrary/env/Lib/site-packages/setuptools/py27compat.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/py27compat.py",
"repo_id": "Django-locallibrary",
"token_count": 643
} | 42 |
//+------------------------------------------------------------------+
//| RealTimeStock Testing.mq5 |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
#property version "1.00"
#property tester_file "Predicted Apple Dataset.csv"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#include <Trade\Trade.mqh>
#include <Trade\PositionInfo.mqh>
//---
CTrade m_trade;
CPositionInfo m_position;
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
input double Lots = 0.01;
//---
string file_name = "Predicted Apple Dataset.csv"; //csv file name
string delimiter = ",";
int Trend[];
string dates[];
datetime date_datetime[];
//---
#define PRINT_VAR(v) ""+#v+""
#define MAGIC_NUMBER 30052022
//+------------------------------------------------------------------+
//| Expert initialization function |
//+------------------------------------------------------------------+
int OnInit()
{
//---
GetColumnDatatoArray(1,Trend);
GetColumnDatatoArray(2,dates);
if (ArraySize(Trend) != ArraySize(dates))
{
printf("Unbalanced size between %s Array and %s Array",PRINT_VAR(Trend),PRINT_VAR(dates));
return(INIT_FAILED);
}
ConvertTimeToStandard();
if (MQLInfoInteger(MQL_TESTER))
Print("Testing DataSet Begins at ",date_datetime[0]);
//---
m_trade.SetExpertMagicNumber(MAGIC_NUMBER);
m_trade.SetTypeFillingBySymbol(Symbol());
m_trade.SetMarginMode();
return(INIT_SUCCEEDED);
}
//+------------------------------------------------------------------+
//| Expert deinitialization function |
//+------------------------------------------------------------------+
void OnDeinit(const int reason)
{
//---
if (reason == REASON_PARAMETERS)
{
GetColumnDatatoArray(1,Trend);
GetColumnDatatoArray(2,dates);
ConvertTimeToStandard();
}
else if (reason == REASON_RECOMPILE)
{
GetColumnDatatoArray(1,Trend);
GetColumnDatatoArray(2,dates);
ConvertTimeToStandard();
}
else
Comment("");
}
//+------------------------------------------------------------------+
//| Expert tick function |
//+------------------------------------------------------------------+
void OnTick()
{
//---
datetime today[1];
int trend_signal = -1; //1 is buy signal 0 is sell signal
CopyTime(Symbol(),PERIOD_D1,0,1,today);
if (isNewBar())
for (int i=0; i<ArraySize(date_datetime); i++)
{
if (today[0] == date_datetime[i]) //train in that specific day only
{
if ((int)Trend[i] == 1)
trend_signal = 1;
else
trend_signal = 0;
// close all the existing positions since we are coming up with new data signals
ClosePosByType(POSITION_TYPE_BUY);
ClosePosByType(POSITION_TYPE_SELL);
break;
}
if (MQLInfoInteger(MQL_TESTER) && today[0] > date_datetime[ArrayMaximum(date_datetime)])
{
Print("we've run out of the testing data, Tester will be cancelled");
ExpertRemove();
}
}
//--- Time to trade
MqlTick tick;
SymbolInfoTick(Symbol(),tick);
double ask = tick.ask , bid = tick.bid;
//---
if (trend_signal == 1 && PositionCounter(POSITION_TYPE_BUY)<1)
{
m_trade.Buy(Lots,Symbol(),ask,0,0," Buy trade ");
ClosePosByType(POSITION_TYPE_SELL); //if the model predicts a bullish market close all sell trades if available
}
if (trend_signal == 0 && PositionCounter(POSITION_TYPE_SELL)<1)
{
m_trade.Sell(Lots,Symbol(),bid,0,0,"Sell trade");
ClosePosByType(POSITION_TYPE_BUY); //vice versa if the model predicts bear market
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void ConvertTimeToStandard()
{
// A one time attempt to convert the date to yy.mm.dd
ArrayResize(date_datetime,ArraySize(dates));
for (int i=0; i<ArraySize(dates); i++)
{
StringReplace(dates[i],"/","."); //replace comma with period in each and every date
//Print(dates[i]);
string mm_dd_yy[];
ushort sep = StringGetCharacter(".",0);
StringSplit(dates[i],sep,mm_dd_yy); //separate month, day and year
//Print("mm dd yy date format");
//ArrayPrint(mm_dd_yy);
string year = mm_dd_yy[2];
string day = mm_dd_yy[1];
string month = mm_dd_yy[0];
dates[i] = year+"."+month+"."+day; //store to a yy.mm.dd format
date_datetime[i] = StringToTime(dates[i]); //lastly convert the string datetime to an actual date and time
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
static int BARS;
bool isNewBar()
{
if(BARS!=Bars(_Symbol,_Period))
{
BARS=Bars(_Symbol,_Period);
return(true);
}
return(false);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void ClosePosByType(ENUM_POSITION_TYPE type)
{
for (int i=PositionsTotal()-1; i>=0; i--)
if (m_position.SelectByIndex(i))
if (m_position.Magic() == MAGIC_NUMBER && m_position.Symbol()==Symbol() && m_position.PositionType()==type)
{
m_trade.PositionClose(m_position.Ticket());
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int PositionCounter(ENUM_POSITION_TYPE type)
{
int counter=0;
for (int i=PositionsTotal()-1; i>=0; i--)
if (m_position.SelectByIndex(i))
if (m_position.Magic() == MAGIC_NUMBER && m_position.Symbol()==Symbol() && m_position.PositionType()==type)
{
counter++;
}
return(counter);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void GetColumnDatatoArray(int from_column_number, int &toArr[])
{
//---
int counter=0;
int column = 0, rows=0;
int m_handle = FileOpen(file_name,FILE_READ|FILE_CSV|FILE_ANSI,delimiter,CP_UTF8);
if (m_handle == INVALID_HANDLE)
Print(__FUNCTION__," Invalid csv handle err=",GetLastError());
//---
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
column++;
//---
if (column==from_column_number)
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(toArr,counter);
toArr[counter-1]=(int)data;
}
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
FileClose(m_handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void GetColumnDatatoArray(int from_column_number, string &toArr[])
{
//---
int counter=0;
int column = 0, rows=0;
int m_handle = FileOpen(file_name,FILE_READ|FILE_CSV|FILE_ANSI,delimiter);
if (m_handle == INVALID_HANDLE)
Print(__FUNCTION__," Invalid csv handle err=",GetLastError());
//---
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
column++;
//---
if (column==from_column_number)
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(toArr,counter);
toArr[counter-1]=data;
}
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
FileClose(m_handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| LogisticRegression-MQL5-and-python/LogisticRegression mql5/RealTimeStock Testing.mq5/0 | {
"file_path": "LogisticRegression-MQL5-and-python/LogisticRegression mql5/RealTimeStock Testing.mq5",
"repo_id": "LogisticRegression-MQL5-and-python",
"token_count": 4696
} | 43 |
//+------------------------------------------------------------------+
//| helpers.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| BaseDimRed class for dimension reduction, containing most useful |
//| that are necessary for the algorithms in this folder |
//+------------------------------------------------------------------+
#include <MALE5\MatrixExtend.mqh>
class BaseDimRed
{
public:
BaseDimRed(void);
~BaseDimRed(void);
static matrix Slice(const matrix &mat, uint from_0_to, int axis=0);
static vector Slice(const vector &v, uint from_0_to);
static matrix subtract(const matrix&mat, const vector &v);
static void ReplaceNaN(matrix &mat);
static matrix Sort(matrix &mat, vector &args);
static vector Sort(vector &v, vector &args);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix BaseDimRed::Slice(const matrix &mat, uint from_0_to, int axis=0)
{
matrix ret = {};
if (from_0_to==0)
{
printf("%s Cannot slice a vector from index 0 to 0",__FUNCTION__);
return mat;
}
switch(axis)
{
case 0:
ret.Resize(from_0_to, mat.Cols());
for (uint i=0; i<mat.Rows(); i++)
ret.Row(mat.Row(i), i);
break;
case 1:
ret.Resize(mat.Rows(), from_0_to);
for (uint i=0; i<mat.Cols(); i++)
ret.Col(mat.Col(i), i);
break;
default:
Print("%s Invalid axis %d axis can be either 0 or 1",__FUNCTION__,axis);
break;
}
return ret;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector BaseDimRed::Slice(const vector &v, uint from_0_to)
{
vector ret(from_0_to);
if (from_0_to==0)
{
printf("%s Cannot slice a vector from index 0 to 0",__FUNCTION__);
return v;
}
for (uint i=0; i<ret.Size(); i++)
ret[i] = v[i];
return ret;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix BaseDimRed::subtract(const matrix&mat, const vector &v)
{
matrix ret = mat;
if (mat.Rows()!=v.Size() && mat.Cols()!=v.Size())
{
printf("%s Dimensions Mismatch",__FUNCTION__);
matrix empty = {};
return empty;
}
bool isrows = v.Size()==mat.Rows() ? true : false;
for (ulong i=0; i<(isrows?mat.Cols():mat.Rows()); i++)
{
if (isrows)
ret.Col(mat.Col(i)-v, i);
else
ret.Row(mat.Row(i)-v, i);
}
return ret;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void BaseDimRed::ReplaceNaN(matrix &mat)
{
for (ulong i = 0; i < mat.Rows(); i++)
for (ulong j = 0; j < mat.Cols(); j++)
if (!MathIsValidNumber(mat[i][j]))
mat[i][j] = 0.0;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix BaseDimRed::Sort(matrix &mat, vector &args)
{
matrix m = mat;
if (args.Size() != mat.Cols())
{
printf("%s Args size != mat.Cols ");
return m;
}
for (ulong i=0; i<mat.Cols(); i++)
m.Col(mat.Col((ulong)args[i]), i);
return m;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector BaseDimRed::Sort(vector &v, vector &args)
{
vector vec = v;
if (args.Size() != v.Size())
{
printf("%s Args size != v.size ");
return vec;
}
for (ulong i=0; i<v.Size(); i++)
vec[i] = v[(int)args[i]];
return vec;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/Dimensionality Reduction/base.mqh/0 | {
"file_path": "MALE5/Dimensionality Reduction/base.mqh",
"repo_id": "MALE5",
"token_count": 2323
} | 44 |
//+------------------------------------------------------------------+
//| Pattern Nets.mqh |
//| Copyright 2022, Fxalgebra.com |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Fxalgebra.com"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| Neural network type for pattern recognition/ can be used to |
//| to predict discrete data target variables. They are widely known |
//| as classification Neural Networks |
//+------------------------------------------------------------------+
#include <MALE5\MatrixExtend.mqh>
#include <MALE5\preprocessing.mqh>
#ifndef RANDOM_STATE
#define RANDOM_STATE 42
#endif
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
enum activation
{
AF_HARD_SIGMOID_ = AF_HARD_SIGMOID,
AF_SIGMOID_ = AF_SIGMOID,
AF_SWISH_ = AF_SWISH,
AF_SOFTSIGN_ = AF_SOFTSIGN,
AF_TANH_ = AF_TANH
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CPatternNets
{
private:
vector W_CONFIG;
vector W; //Weights vector
vector B; //Bias vector
activation A_FX;
protected:
ulong inputs;
ulong outputs;
ulong rows;
vector HL_CONFIG;
bool SoftMaxLayer;
vector classes;
void SoftMaxLayerFX(matrix<double> &mat);
public:
CPatternNets(matrix &xmatrix, vector &yvector,vector &HL_NODES, activation ActivationFx, bool SoftMaxLyr=false);
~CPatternNets(void);
int PatternNetFF(vector &in_vector);
vector PatternNetFF(matrix &xmatrix);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CPatternNets::CPatternNets(matrix &xmatrix, vector &yvector,vector &HL_NODES, activation ActivationFx, bool SoftMaxLyr=false)
{
A_FX = ActivationFx;
inputs = xmatrix.Cols();
rows = xmatrix.Rows();
SoftMaxLayer = SoftMaxLyr;
//--- Normalize data
if (rows != yvector.Size())
{
Print(__FUNCTION__," FATAL | Number of rows in the x matrix is not the same the y vector size ");
return;
}
classes = MatrixExtend::Unique(yvector);
outputs = classes.Size();
HL_CONFIG.Copy(HL_NODES);
HL_CONFIG.Resize(HL_CONFIG.Size()+1); //Add the output layer
HL_CONFIG[HL_CONFIG.Size()-1] = (int)outputs; //Append one node to the output layer
//---
W_CONFIG.Resize(HL_CONFIG.Size());
B.Resize((ulong)HL_CONFIG.Sum());
//--- GENERATE WEIGHTS
ulong layer_input = inputs;
for (ulong i=0; i<HL_CONFIG.Size(); i++)
{
W_CONFIG[i] = layer_input*HL_CONFIG[i];
layer_input = (ulong)HL_CONFIG[i];
}
W.Resize((ulong)W_CONFIG.Sum());
W = MatrixExtend::Random(0.0, 1.0, (int)W.Size(),RANDOM_STATE); //Gen weights
B = MatrixExtend::Random(0.0,1.0,(int)B.Size(),RANDOM_STATE); //Gen bias
//---
#ifdef DEBUG_MODE
Comment(
"< - - - P A T T E R N N E T S - - - >\n",
"HIDDEN LAYERS + OUTPUT ",HL_CONFIG,"\n",
"INPUTS ",inputs," | OUTPUTS ",outputs," W CONFIG ",W_CONFIG,"\n",
"activation ",EnumToString(A_FX)," SoftMaxLayer = ",bool(SoftMaxLayer)
);
Print("WEIGHTS ",W,"\nBIAS ",B);
#endif
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CPatternNets::~CPatternNets(void)
{
ZeroMemory(W);
ZeroMemory(B);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CPatternNets::PatternNetFF(vector &in_vector)
{
matrix L_INPUT = {}, L_OUTPUT={}, L_WEIGHTS = {};
vector v_weights ={};
ulong w_start = 0;
L_INPUT = MatrixExtend::VectorToMatrix(in_vector);
vector L_BIAS_VECTOR = {};
matrix L_BIAS_MATRIX = {};
ulong b_start = 0;
for (ulong i=0; i<W_CONFIG.Size(); i++)
{
MatrixExtend::Copy(W,v_weights,w_start,ulong(W_CONFIG[i]));
L_WEIGHTS = MatrixExtend::VectorToMatrix(v_weights,L_INPUT.Rows());
MatrixExtend::Copy(B,L_BIAS_VECTOR,b_start,(ulong)HL_CONFIG[i]);
L_BIAS_MATRIX = MatrixExtend::VectorToMatrix(L_BIAS_VECTOR);
#ifdef DEBUG_MODE
Print("--> ",i);
Print("L_WEIGHTS\n",L_WEIGHTS,"\nL_INPUT\n",L_INPUT,"\nL_BIAS\n",L_BIAS_MATRIX);
#endif
L_OUTPUT = L_WEIGHTS.MatMul(L_INPUT);
L_OUTPUT = L_OUTPUT+L_BIAS_MATRIX; //Add bias
//---
if (i==W_CONFIG.Size()-1) //Last layer
{
if (SoftMaxLayer)
{
Print("Before softmax\n",L_OUTPUT);
SoftMaxLayerFX(L_OUTPUT);
Print("After\n",L_OUTPUT);
}
else
L_OUTPUT.Activation(L_OUTPUT, ENUM_ACTIVATION_FUNCTION(A_FX));
}
else
L_OUTPUT.Activation(L_OUTPUT, ENUM_ACTIVATION_FUNCTION(A_FX));
//---
L_INPUT.Copy(L_OUTPUT); //Assign outputs to the inputs
w_start += (ulong)W_CONFIG[i]; //New weights copy
b_start += (ulong)HL_CONFIG[i];
}
#ifdef DEBUG_MODE
Print("--> outputs\n ",L_OUTPUT);
#endif
vector v_out = MatrixExtend::MatrixToVector(L_OUTPUT);
return((int)classes[v_out.ArgMax()]);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CPatternNets::SoftMaxLayerFX(matrix<double> &mat)
{
vector<double> ret = MatrixExtend::MatrixToVector(mat);
ret.Activation(ret, AF_SOFTMAX);
mat = MatrixExtend::VectorToMatrix(ret, mat.Cols());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CPatternNets::PatternNetFF(matrix &xmatrix)
{
vector v(xmatrix.Rows());
for (ulong i=0; i<xmatrix.Rows(); i++)
v[i] = PatternNetFF(xmatrix.Row(i));
return (v);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/Neural Networks/Pattern Nets.mqh/0 | {
"file_path": "MALE5/Neural Networks/Pattern Nets.mqh",
"repo_id": "MALE5",
"token_count": 3603
} | 45 |
//+------------------------------------------------------------------+
//| NeuralNets.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
#import "The_Matrix.ex5" //source code here >>> https://www.mql5.com/en/market/product/81533
void MatrixMultiply(double &A[],double &B[],double &AxBMatrix[], int colsA,int rowsB,int &new_rows,int &new_cols);
void CSVToMatrix(double &Matrix[],int &mat_rows,int &mat_cols,string csv_file,string sep=",");
void MatrixPrint(double &Matrix[],int cols,int digits=5);
#import
bool m_debug = true;
//+------------------------------------------------------------------+
enum fx
{
RELU,
SIGMOID,
TANH
} A_fx, last_AFx;
//+------------------------------------------------------------------+
class CNeuralNets
{
private:
void setmodelParams(double &w[],double &b[]);
bool WriteBin(double &w[],double &b[]); //store the model to binary files
protected:
double e;
bool use_softmax;
int m_inputs;
int m_hiddenLayers;
int m_outputLayers;
int m_hiddenLayerNodes[];
double Sigmoid(double z);
double tanh(double z);
double Relu(double z);
void SoftMax(double &Nodes[]);
double ActivationFx(double Q);
double MathRandom(double mini, double maxi);
public:
CNeuralNets(fx HActivationFx,fx OActivationFx,int inputs,int &NodesHL[],int outputs=NULL, bool SoftMax=false);
~CNeuralNets(void);
void train_feedforwardMLP(double &XMatrix[],int epochs=1);
void FeedForwardMLP(double &MLPInputs[],double &MLPOutput[],double &Weights[],double &bias[]);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuralNets::CNeuralNets(fx HActivationFx,fx OActivationFx,int inputs,int &NodesHL[],int outputs=NULL, bool SoftMax=false)
{
e = 2.718281828;
use_softmax = SoftMax;
A_fx = HActivationFx;
last_AFx = OActivationFx;
m_inputs = inputs;
m_hiddenLayers = ArraySize(NodesHL);
ArrayCopy(m_hiddenLayerNodes,NodesHL);
m_outputLayers = outputs;
if (m_debug) printf("CNeural Nets Initialized Hidden Layer Activation = %s Output Activation %s UseSoftMax = %s",EnumToString(HActivationFx),EnumToString(OActivationFx),SoftMax?"Yes":"No");
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuralNets::~CNeuralNets(void)
{
ArrayFree(m_hiddenLayerNodes);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuralNets::Sigmoid(double z)
{
return(1.0/(1.0+MathPow(e,-z)));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuralNets::tanh(double z)
{
return((MathPow(e,z) - MathPow(e,-z))/(MathPow(e,z) + MathPow(e,-z)));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuralNets::Relu(double z)
{
if (z < 0) return(0);
else return(z);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNeuralNets::SoftMax(double &Nodes[])
{
double TempArr[];
ArrayCopy(TempArr,Nodes);
double proba = 0, sum=0;
for (int j=0; j<ArraySize(TempArr); j++) sum += MathPow(e,TempArr[j]);
for (int i=0; i<ArraySize(TempArr); i++)
{
proba = MathPow(e,TempArr[i])/sum;
Nodes[i] = proba;
}
ArrayFree(TempArr);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuralNets::ActivationFx(double Q)
{
double out = 0;
switch(A_fx)
{
case SIGMOID:
out = Sigmoid(Q);
break;
case TANH:
out = (tanh(Q));
break;
case RELU:
out = (Relu(Q));
break;
default:
Print("Unknown Activation Function");
break;
}
return(out);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNeuralNets::setmodelParams(double &w[],double &b[])
{
m_hiddenLayers = m_hiddenLayers+1;
ArrayResize(m_hiddenLayerNodes,m_hiddenLayers);
m_hiddenLayerNodes[m_hiddenLayers-1] = m_outputLayers;
ArrayResize(b,m_hiddenLayers);
//Generate random bias
for(int i=0; i<m_hiddenLayers; i++) b[i] = MathRandom(0,1);
if (m_debug)
{
Print("biases");
ArrayPrint(b,4);
}
//generate weights
int sum_weights=0, L_inputs=m_inputs;
for (int i=0; i<m_hiddenLayers; i++)
{
sum_weights += L_inputs * m_hiddenLayerNodes[i];
ArrayResize(w,sum_weights);
L_inputs = m_hiddenLayerNodes[i];
}
for (int j=0; j<sum_weights; j++) w[j] = MathRandom(0,1);
if (m_debug)
{
Print(" weights ",ArraySize(w));
ArrayPrint(w,3);
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNeuralNets::FeedForwardMLP(
double &MLPInputs[],
double &MLPOutput[],
double &Weights[],
double &bias[])
{
if (m_outputLayers == NULL)
{
if (A_fx == RELU) m_outputLayers = 1;
else m_outputLayers = ArraySize(MLPInputs);
}
if (ArraySize(Weights) == 0 || ArraySize(bias) == 0) Print("There is an empty model weights or bias, Train the Model first before using it");
//---
if (m_debug)
{
Print("Hidden layer nodes plus the output");
ArrayPrint(m_hiddenLayerNodes);
}
int HLnodes = ArraySize(MLPInputs);
int weight_start = 0;
//---
double L_weights[];
int inputs=ArraySize(MLPInputs);
int w_size = 0; //size of weights
int cols = inputs, rows=1;
double IxWMatrix[]; //dot product matrix
int start = 0; //starting to copy weights
for (int i=0; i<m_hiddenLayers; i++)
{
w_size = (inputs*m_hiddenLayerNodes[i]);
ArrayResize(L_weights,w_size);
ArrayCopy(L_weights,Weights,0,start,w_size);
start += w_size;
if (m_debug) {
printf("Hidden Layer %d | Nodes %d | Bias %.4f",i+1,m_hiddenLayerNodes[i],bias[i]);
printf("Inputs %d Weights %d",ArraySize(MLPInputs),ArraySize(L_weights));
ArrayPrint(MLPInputs,5); ArrayPrint(L_weights,3);
}
MatrixMultiply(MLPInputs,L_weights,IxWMatrix,cols,cols,rows,cols);
//printf("before rows %d cols %d",rows,cols);
if (m_debug) { Print("\nIxWMatrix"); MatrixPrint(IxWMatrix,cols); }
ArrayFree(MLPInputs); ArrayResize(MLPInputs,m_hiddenLayerNodes[i]);
inputs = ArraySize(MLPInputs);
if (i == m_hiddenLayers-1) A_fx = last_AFx; //last layer Activation Function
if (m_debug) Print("Activation Function ",EnumToString(A_fx));
for(int k=0; k<ArraySize(IxWMatrix); k++) MLPInputs[k] = ActivationFx(IxWMatrix[k]+bias[i]);
if (m_debug)
{
Print("AFter Activation Function new inputs");
MatrixPrint(MLPInputs,cols);
}
}
if (use_softmax) SoftMax(MLPInputs);
ArrayCopy(MLPOutput,MLPInputs);
if (m_debug) { Print("MLP Final Output"); ArrayPrint(MLPOutput,3); }
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuralNets:: MathRandom(double mini, double maxi)
{
double f = (MathRand() / 32768.0);
return mini + (f * (maxi - mini));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNeuralNets::train_feedforwardMLP(double &XMatrix[],int epochs=1)
{
double MLPInputs[]; ArrayResize(MLPInputs,m_inputs);
double MLPOutputs[]; ArrayResize(MLPOutputs,m_outputLayers);
double Weights[], bias[];
setmodelParams(Weights,bias); //Generating random weights and bias
for (int i=0; i<epochs; i++)
{
int start = 0;
int rows = ArraySize(XMatrix)/m_inputs;
for (int j=0; j<rows; j++) //iterate the entire dataset in a single epoch
{
if (m_debug) printf("<<<< %d >>>",j+1);
ArrayCopy(MLPInputs,XMatrix,0,start,m_inputs);
FeedForwardMLP(MLPInputs,MLPOutputs,Weights,bias);
start+=m_inputs;
}
}
WriteBin(Weights,bias);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuralNets::WriteBin(double &w[], double &b[])
{
string file_name_w = NULL, file_name_b= NULL;
int handle_w, handle_b;
file_name_w = MQLInfoString(MQL_PROGRAM_NAME)+"\\"+"model_w.bin";
file_name_b = MQLInfoString(MQL_PROGRAM_NAME)+"\\"+"model_b.bin";
FileDelete(file_name_w); FileDelete(file_name_b);
handle_w = FileOpen(file_name_w,FILE_WRITE|FILE_BIN);
if (handle_w == INVALID_HANDLE) { printf("Invalid %s Handle err %d",file_name_w,GetLastError()); }
else FileWriteArray(handle_w,w);
FileClose(handle_w);
handle_b = FileOpen(file_name_b,FILE_WRITE|FILE_BIN);
if (handle_b == INVALID_HANDLE) { printf("Invalid %s Handle err %d",file_name_b,GetLastError()); }
else FileWriteArray(handle_b,b);
FileClose(handle_b);
return(true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| NeuralNetworks-MQL5/NeuralNets.mqh/0 | {
"file_path": "NeuralNetworks-MQL5/NeuralNets.mqh",
"repo_id": "NeuralNetworks-MQL5",
"token_count": 6129
} | 46 |
//+------------------------------------------------------------------+
//| ONNX mt5.mq5 |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
#property version "1.00"
#include <Trade\Trade.mqh>
CTrade m_trade;
#include <Trade\PositionInfo.mqh>
CPositionInfo m_position;
#resource "\\Files\\ONNX Models\\MLP.REG.CLOSE.10000.onnx" as uchar RNNModel[]
#define UNDEFINED_REPLACE 1
#include <MALE5\preprocessing.mqh> //https://github.com/MegaJoctan/MALE5/blob/master/preprocessing.mqh
CPreprocessing<vectorf, matrixf> *norm_x;
input string info = "The inputs values should match the collect data script";
input uint start_bar = 100;
input uint total_bars = 10000;
input group "TRADE PARAMS";
input uint MAGIC_NUMBER = 7102023;
input uint SLIPPAGE = 100;
input uint STOPLOSS = 200;
input norm_technique NORM = NORM_STANDARDIZATION;
string csv_name_ = "";
string normparams_folder = "ONNX Normparams\\";
uint batch_size =1;
long mlp_onnxhandle;
int inputs[], outputs[];
vectorf OPEN,
HIGH,
LOW;
matrixf input_data = {};
vectorf output_data(1); //It is very crucial to resize this vector
MqlTick ticks;
//+------------------------------------------------------------------+
//| Expert initialization function |
//+------------------------------------------------------------------+
int OnInit()
{
//---
//normparams_folder = (MQLInfoInteger(MQL_TESTER) || MQLInfoInteger(MQL_OPTIMIZATION)) ? "" : normparams_folder;
if (!LoadNormParams()) //Load the normalization parameters saved once
{
Print("Normalization parameters csv files couldn't be found \nEnsure you are collecting data and Normalizing them using [ONNX get data.ex5] Script \nTrain the Python model again if necessary");
return INIT_FAILED;
}
//--- ONNX SETTINGS
mlp_onnxhandle = OnnxCreateFromBuffer(RNNModel, MQLInfoInteger(MQL_DEBUG) ? ONNX_DEBUG_LOGS : ONNX_DEFAULT); //creating onnx handle buffer | rUN DEGUG MODE during debug mode
if (mlp_onnxhandle == INVALID_HANDLE)
{
Print("OnnxCreateFromBuffer Error = ",GetLastError());
return INIT_FAILED;
}
//--- since not all sizes defined in the input tensor we must set them explicitly
//--- first index - batch size, second index - series size, third index - number of series (only Close)
OnnxTypeInfo type_info; //Getting onnx information for Reference In case you forgot what the loaded ONNX is all about
long input_count=OnnxGetInputCount(mlp_onnxhandle);
Print("model has ",input_count," input(s)");
for(long i=0; i<input_count; i++)
{
string input_name=OnnxGetInputName(mlp_onnxhandle,i);
Print(i," input name is ",input_name);
if(OnnxGetInputTypeInfo(mlp_onnxhandle,i,type_info))
{
PrintTypeInfo(i,"input",type_info);
ArrayCopy(inputs, type_info.tensor.dimensions);
}
}
long output_count=OnnxGetOutputCount(mlp_onnxhandle);
Print("model has ",output_count," output(s)");
for(long i=0; i<output_count; i++)
{
string output_name=OnnxGetOutputName(mlp_onnxhandle,i);
Print(i," output name is ",output_name);
if(OnnxGetOutputTypeInfo(mlp_onnxhandle,i,type_info))
{
PrintTypeInfo(i,"output",type_info);
ArrayCopy(outputs, type_info.tensor.dimensions);
}
}
//---
if (MQLInfoInteger(MQL_DEBUG))
{
Print("Inputs & Outputs");
ArrayPrint(inputs);
ArrayPrint(outputs);
}
const long input_shape[] = {batch_size, 3};
if (!OnnxSetInputShape(mlp_onnxhandle, 0, input_shape)) //Giving the Onnx handle the input shape
{
printf("Failed to set the input shape Err=%d",GetLastError());
return INIT_FAILED;
}
const long output_shape[] = {batch_size, 1};
if (!OnnxSetOutputShape(mlp_onnxhandle, 0, output_shape)) //giving the onnx handle the output shape
{
printf("Failed to set the input shape Err=%d",GetLastError());
return INIT_FAILED;
}
//--- Trade Libraries init
m_trade.SetExpertMagicNumber(MAGIC_NUMBER);
m_trade.SetDeviationInPoints(SLIPPAGE);
m_trade.SetTypeFillingBySymbol(Symbol());
m_trade.SetMarginMode();
return(INIT_SUCCEEDED);
}
//+------------------------------------------------------------------+
//| Expert deinitialization function |
//+------------------------------------------------------------------+
void OnDeinit(const int reason)
{
//---
while (CheckPointer(norm_x) != POINTER_INVALID)
delete (norm_x);
OnnxRelease(mlp_onnxhandle);
}
//+------------------------------------------------------------------+
//| Expert tick function |
//+------------------------------------------------------------------+
void OnTick()
{
//---
input_data = GetLiveData(0,1);
if (!OnnxRun(mlp_onnxhandle, ONNX_NO_CONVERSION, input_data, output_data))
{
Print("Failed to Get the Predictions Err=",GetLastError());
return;
}
Comment("inputs_data\n",input_data,"\npredictions\n",output_data);
//--- Simple trading
SymbolInfoTick(Symbol(), ticks);
double min_vol = SymbolInfoDouble(Symbol(), SYMBOL_VOLUME_MIN);
int stops_level = (int)SymbolInfoInteger(Symbol(), SYMBOL_TRADE_STOPS_LEVEL);
if (!TradeExists(POSITION_TYPE_BUY) && output_data[0] > ticks.ask+stops_level*Point()) //if there is no open trade of such kind and the predicted price is higher than the current ask
m_trade.Buy(min_vol, Symbol(), ticks.ask, ticks.bid - (stops_level + STOPLOSS)*Point(), output_data[0]);
if (!TradeExists(POSITION_TYPE_SELL) && output_data[0] < ticks.bid-stops_level*Point())
m_trade.Sell(min_vol, Symbol(), ticks.bid, ticks.ask + (stops_level + STOPLOSS)*Point(), output_data[0]);
}
//+------------------------------------------------------------------+
//| This Function Loads the normalization parameters saved inside |
//| csv files and applies them to new CPreprocessing instance for |
//| constistent normalization of the parameters to get accurate |
//| predictions in out of sample data |
//+------------------------------------------------------------------+
bool LoadNormParams()
{
vectorf min = {}, max ={}, mean={} , std = {};
csv_name_ = Symbol()+"."+EnumToString(Period())+"."+string(total_bars);
switch(NORM)
{
case NORM_MEAN_NORM:
mean = ReadCsvVector(normparams_folder+csv_name_+".mean_norm_scaler.mean.csv"); //--- Loading the mean
min = ReadCsvVector(normparams_folder+csv_name_+".mean_norm_scaler.min.csv"); //--- Loading the min
max = ReadCsvVector(normparams_folder+csv_name_+".mean_norm_scaler.max.csv"); //--- Loading the max
if (MQLInfoInteger(MQL_DEBUG))
Print(EnumToString(NORM),"\nMean ",mean,"\nMin ",min,"\nMax ",max);
norm_x = new CPreprocessing<vectorf,matrixf>(max, mean, min);
if (mean.Sum()<=0 && min.Sum()<=0 && max.Sum() <=0)
return false;
break;
case NORM_MIN_MAX_SCALER:
min = ReadCsvVector(normparams_folder+csv_name_+".min_max_scaler.min.csv"); //--- Loading the min
max = ReadCsvVector(normparams_folder+csv_name_+".min_max_scaler.max.csv"); //--- Loading the max
if (MQLInfoInteger(MQL_DEBUG))
Print(EnumToString(NORM),"\nMin ",min,"\nMax ",max);
norm_x = new CPreprocessing<vectorf,matrixf>(max, min);
if (min.Sum()<=0 && max.Sum() <=0)
return false;
break;
case NORM_STANDARDIZATION:
mean = ReadCsvVector(normparams_folder+csv_name_+".standardization_scaler.mean.csv"); //--- Loading the mean
std = ReadCsvVector(normparams_folder+csv_name_+".standardization_scaler.std.csv"); //--- Loading the std
if (MQLInfoInteger(MQL_DEBUG))
Print(EnumToString(NORM),"\nMean ",mean,"\nStd ",std);
norm_x = new CPreprocessing<vectorf,matrixf>(mean, std, NORM_STANDARDIZATION);
if (mean.Sum()<=0 && std.Sum() <=0)
return false;
break;
}
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
string csv_headerArr[];
vectorf ReadCsvVector(string file_name, string delimiter=",")
{
matrix mat_ = {};
int rows_total=0;
int handle = FileOpen(file_name,FILE_READ|FILE_CSV|FILE_ANSI,delimiter);
//ResetLastError();
if(handle == INVALID_HANDLE)
{
printf("Invalid %s handle Error %d ",file_name,GetLastError());
Print(GetLastError()==0?" TIP | File Might be in use Somewhere else or in another Directory":"");
FileClose(handle);
}
else
{
int column = 0, rows=0;
while(!FileIsEnding(handle) && !IsStopped())
{
string data = FileReadString(handle);
//---
if(rows ==0)
{
ArrayResize(csv_headerArr,column+1);
csv_headerArr[column] = data;
}
if(rows>0) //Avoid the first column which contains the column's header
mat_[rows-1,column] = (double(data));
column++;
//---
if(FileIsLineEnding(handle))
{
rows++;
mat_.Resize(rows,column);
column = 0;
}
}
rows_total = rows;
FileClose(handle);
}
mat_.Resize(rows_total-1,mat_.Cols());
vectorf vec = {};
vec.Assign(mat_);
return(vec);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void PrintTypeInfo(const long num,const string layer,const OnnxTypeInfo& type_info)
{
Print(" type ",EnumToString(type_info.type));
Print(" data type ",EnumToString(type_info.type));
if(type_info.tensor.dimensions.Size()>0)
{
bool dim_defined=(type_info.tensor.dimensions[0]>0);
string dimensions=IntegerToString(type_info.tensor.dimensions[0]);
for(long n=1; n<type_info.tensor.dimensions.Size(); n++)
{
if(type_info.tensor.dimensions[n]<=0)
dim_defined=false;
dimensions+=", ";
dimensions+=IntegerToString(type_info.tensor.dimensions[n]);
}
Print(" shape [",dimensions,"]");
//--- not all dimensions defined
if(!dim_defined)
PrintFormat(" %I64d %s shape must be defined explicitly before model inference",num,layer);
//--- reduce shape
uint reduced=0;
long dims[];
for(long n=0; n<type_info.tensor.dimensions.Size(); n++)
{
long dimension=type_info.tensor.dimensions[n];
//--- replace undefined dimension
if(dimension<=0)
dimension=UNDEFINED_REPLACE;
//--- 1 can be reduced
if(dimension>1)
{
ArrayResize(dims,reduced+1);
dims[reduced++]=dimension;
}
}
//--- all dimensions assumed 1
if(reduced==0)
{
ArrayResize(dims,1);
dims[reduced++]=1;
}
//--- shape was reduced
if(reduced<type_info.tensor.dimensions.Size())
{
dimensions=IntegerToString(dims[0]);
for(long n=1; n<dims.Size(); n++)
{
dimensions+=", ";
dimensions+=IntegerToString(dims[n]);
}
string sentence="";
if(!dim_defined)
sentence=" if undefined dimension set to "+(string)UNDEFINED_REPLACE;
PrintFormat(" shape of %s data can be reduced to [%s]%s",layer,dimensions,sentence);
}
}
else
PrintFormat("no dimensions defined for %I64d %s",num,layer);
}
//+------------------------------------------------------------------+
//| This Function obtains the total variable sum of historical data |
//| starting at the bar located at the start variable |
//+------------------------------------------------------------------+
matrixf GetLiveData(uint start, uint total)
{
matrixf return_matrix(total, 3);
OPEN.CopyRates(Symbol(), PERIOD_CURRENT,COPY_RATES_OPEN, start, total);
HIGH.CopyRates(Symbol(), PERIOD_CURRENT,COPY_RATES_HIGH, start, total);
LOW.CopyRates(Symbol(), PERIOD_CURRENT,COPY_RATES_LOW, start, total);
return_matrix.Col(OPEN, 0);
return_matrix.Col(HIGH, 1);
return_matrix.Col(LOW, 2);
if (!norm_x.Normalization(return_matrix))
Print("Failed to Normalize");
return return_matrix;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool TradeExists(ENUM_POSITION_TYPE type)
{
bool exists = false;
for (int i=PositionsTotal()-1; i>=0; i--)
if (m_position.SelectByIndex(i))
if (m_position.Magic() == MAGIC_NUMBER && m_position.Symbol() == Symbol() && m_position.PositionType() == type)
{
exists = true;
break;
}
return exists;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| ONNX-MQL5/ONNX mt5.mq5/0 | {
"file_path": "ONNX-MQL5/ONNX mt5.mq5",
"repo_id": "ONNX-MQL5",
"token_count": 6206
} | 47 |
## The K-Means Algorithm in MQL5
This branch on the repo data mining is for the widely known clustering algorithm,
The K-Means algorithm, for more information read this [Article](https://www.mql5.com/en/articles/11615)

Any contributions and thoughts will be appreciated
[BuyMeACoffee](https://www.buymeacoffee.com/omegajoctan)
| Data-Mining-MQL5/README.md/0 | {
"file_path": "Data-Mining-MQL5/README.md",
"repo_id": "Data-Mining-MQL5",
"token_count": 139
} | 0 |
//+------------------------------------------------------------------+
//| DecisionTreeLib.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CDecisionTree
{
private:
string TargetArr[]; //TargetArr Variable
string m_dataset[]; //all the independent variables + dependent variable
string m_DatasetClasses[]; //class(es) available inside our Independent variable
string DataColumnNames[]; //string of all the columns names
string tree_file;
int _n; //for counting Column names
int m_targetvar_column;
int m_colschosen; //columns chosen from our dataset
int rows_total; //all the rows total combined from dataset
string m_dataColsArray[]; //string of numbers of columns chosen
int single_rowstotal; //single column rows total
bool m_debug;
int m_handle;
string m_delimiter;
string m_filename;
protected:
bool fileopen();
void GetAllDataToArray(string &toArr[]);
void GetColumnDatatoArray(int from_column_number, string &toArr[]);
void MatrixRemoveRow(string &dataArr[], string content_detect, int cols);
void MatrixRemoveColumn(string &dataArr[],int column,int rows);
void MatrixClassify(string &dataArr[], string &Classes[],int cols);
void MatrixPrint(string &Matrix[],int rows,int cols,int digits=0);
void MatrixUnTranspose(string &Matrix[],int torows, int tocolumns);
void GetClasses(string &Array[],string &Classes[],int &ClassNumbers[]);
void GetSamples(string &Array[],string &ArrTarget[], string Class, int &SamplesNumbers[]);
double Entropy(int &SampleNumbers[], int total);
double InformationGain(double parent_entropy, double &EntropyArr[], int &ClassNumbers[], int rows_);
double Proba(int number,double total); //find the probability function
double log2(double value); //find the log of base 2
void DrawTree(string &Classes[],string node, int index);
public:
void Init(int targetvar_column,string x_columns,string filename,string delimiter=",",bool debugmode=true);
void BuildTree();
CDecisionTree(void);
~CDecisionTree(void);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CDecisionTree::CDecisionTree(void)
{
ArrayResize(m_DatasetClasses,2); //classes in our target variable
tree_file = "decisiontree.txt";
FileDelete(tree_file); //delete this output file if it exists
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CDecisionTree::~CDecisionTree(void)
{
ArrayFree(TargetArr);
ArrayFree(m_dataset);
ArrayFree(DataColumnNames);
ArrayFree(m_dataColsArray);
ArrayFree(m_DatasetClasses);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::Init(int targetvar_column,string x_columns,string filename,string delimiter=",",bool debugmode = true)
{
m_targetvar_column = targetvar_column;
m_filename = filename;
m_delimiter = delimiter;
m_debug = debugmode;
//---
ushort separator = StringGetCharacter(m_delimiter,0);
StringSplit(x_columns,separator,m_dataColsArray);
ArrayResize(m_dataColsArray,ArraySize(m_dataColsArray)+1); //plus the independent variables column
int dataset_cols = ArraySize(m_dataColsArray);
m_colschosen = dataset_cols;
ArrayResize(DataColumnNames,m_colschosen);
m_dataColsArray[m_colschosen-1] = string(targetvar_column); //Add the target variable to the last column
//---
GetAllDataToArray(m_dataset);
GetColumnDatatoArray(targetvar_column,TargetArr);
if (rows_total % m_colschosen != 0)
Alert("There is variance(s) in your dataset rows, Calculations may fall short");
else
single_rowstotal = rows_total/m_colschosen;
//---
MatrixUnTranspose(m_dataset,m_colschosen,single_rowstotal); //we untranspose the Array to be in Matrix human-readable form
if (m_debug)
{
Print("The Entire Dataset \nEach Columns rows =",single_rowstotal," Columns =",m_colschosen);
ArrayPrint(DataColumnNames);
MatrixPrint(m_dataset,m_colschosen,single_rowstotal); //To Print the matrix put rows in a place of columns and viceversa
PrintFormat("Target variable %s",DataColumnNames[m_colschosen-1]);
MatrixPrint(TargetArr,1,single_rowstotal);
}
//---
/*
printf("%s classes",DataColumnNames[m_colschosen-1]);
ArrayPrint(m_DatasetClasses);
ArrayPrint(ClassNumbers,0," ");
*/
//---
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::GetClasses(string &Array[],string &Classes[],int &ClassNumbers[])
{
string TempArr[], TempArr2[];
ArrayCopy(TempArr,Array);
ArrayCopy(TempArr2,TempArr);
//--- find classified contents
string Content;
int n_classes=0, total_class_contents=0;
for (int i=0; i<ArraySize(TempArr); i++)
{
int counter = 0; //for counting the same contents
for (int j=0; j<ArraySize(TempArr2); j++)
{
if (TempArr[i] == TempArr2[j])
{
counter++;
if (counter==1)
Content = TempArr2[j]; //Get the first element of the class
TempArr2[j] = NULL; //we put null to all the values we have already counted to avoid counting them again
if (counter > ArraySize(TempArr))
break; //Get out of this loop as it might be an Infinity one
}
}
//---
if (counter>0) //if new different class has been detected
{
total_class_contents += counter;
n_classes++;
ArrayResize(Classes,n_classes);
ArrayResize(ClassNumbers,n_classes);
ClassNumbers[n_classes-1] = counter;
Classes[n_classes-1] = Content;
//if (m_debug) printf("There are %d %s total_class_contents =%d ",counter,Content,total_class_contents);
}
//---
if ( total_class_contents >= ArraySize(TempArr))
break; //we are done getting the numbers get out of the loop
}
ArrayFree(TempArr);
ArrayFree(TempArr2);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::GetSamples(string &Array[], string &ArrTarget[], string Class, int &SamplesNumbers[])
{
ArrayResize(SamplesNumbers,ArraySize(m_DatasetClasses));
if (ArraySize(Array) != ArraySize(ArrTarget))
Print("There is unequal size of Array and Its target Array in Funtion", __FUNCTION__);
for (int i=0; i<ArraySize(m_DatasetClasses); i++)
{
int counter =0;
for (int j=0; j<ArraySize(Array); j++)
if (Class == Array[j])
{
if (ArrTarget[j] == m_DatasetClasses[i])
counter++;
}
//if (m_debug) Print(Class," counter ",counter," ",m_DatasetClasses[i]);
SamplesNumbers[i] = counter;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::BuildTree(void)
{
int ClassNumbers[];
int max_gain = 0;
double IGArr[];
//double parent_entropy = Entropy(p_ClassNumbers,single_rowstotal);
string p_Classes[]; //parent classes
double P_EntropyArr[]; //Parent Entropy
int p_ClassNumbers[]; //parent/ Target variable class numbers
GetClasses(TargetArr,m_DatasetClasses,p_ClassNumbers);
ArrayResize(P_EntropyArr,1);
P_EntropyArr[0] = Entropy(p_ClassNumbers,single_rowstotal);
//--- temporary disposable arrays for parent node information
string TempP_Classes[];
double TempP_EntropyArr[];
int TempP_ClassNumbers[];
//---
if (m_debug) Print("Default Parent Entropy ",P_EntropyArr[0]);
int cols = m_colschosen;
for (int A =0; A<ArraySize(P_EntropyArr); A++)
{
printf("<<<<<<<< Parent Entropy %.5f A = %d >>>>>>>> ",P_EntropyArr[A],A);
for (int i=0; i<cols-1; i++) //we substract with one to remove the independent variable coumn
{
int rows = ArraySize(m_dataset)/cols;
string Arr[]; //ArrayFor the current column
string ArrTarg[]; //Array for the current target
ArrayResize(Arr,rows);
ArrayResize(ArrTarg,rows);
printf(" <<<<< C O L U M N %s >>>>> ",DataColumnNames[i]);
int index_target=cols-1;
for (int j=0; j<rows; j++) //get column data and its target column
{
int index = i+j * cols;
//Print("index ",index);
Arr[j] = m_dataset[index];
//printf("ArrTarg[%d] = %s m_dataset[%d] =%s ",j,ArrTarg[j],index_target,m_dataset[index_target]);
ArrTarg[j] = m_dataset[index_target];
//printf("Arr[%d] = %s ArrTarg[%d] = %s ",j,Arr[j],j,ArrTarg[j]);
index_target += cols; //the last index of all the columns
}
//Print("Target Array");
//ArrayPrint(ArrTarg);
//---
string Classes[];
GetClasses(Arr,Classes,ClassNumbers);
//ArrayPrint(Classes);
//ArrayPrint(ClassNumbers);
//--- Finding the Entropy
int SamplesNumbers[];
double EntropyArr[];
ArrayResize(EntropyArr,ArraySize(ClassNumbers));
for (int k=0; k<ArraySize(ClassNumbers); k++)
{
GetSamples(Arr,ArrTarg,Classes[k],SamplesNumbers);
Print(" << ",Classes[k]," >> total > ",ClassNumbers[k]);
ArrayPrint(m_DatasetClasses);
ArrayPrint(SamplesNumbers);
EntropyArr[k] = Entropy(SamplesNumbers,ClassNumbers[k]);
//Print("Arraysize Class numbers ",ArraySize(ClassNumbers));
printf("Entropy of %s = %.5f",Classes[k],Entropy(SamplesNumbers,ClassNumbers[k]));
}
//--- Finding the Information Gain
ArrayResize(IGArr,i+1); //information gains matches the columns number
IGArr[i] = InformationGain(P_EntropyArr[A],EntropyArr,ClassNumbers,rows);
max_gain = ArrayMaximum(IGArr);
if (m_debug)
printf("<<<<<< Column Information Gain %.5f >>>>>> \n",IGArr[i]);
//---
if (i == max_gain)
{
//printf("Max gain found the EntropyArray is i =%d max_gain = %d",i,max_gain);
ArrayCopy(TempP_Classes,Classes);
ArrayCopy(TempP_EntropyArr,EntropyArr);
ArrayCopy(TempP_ClassNumbers,ClassNumbers);
}
//---
ZeroMemory(ClassNumbers);
ZeroMemory(SamplesNumbers);
}
//---- Get the parent Entropy, class and class numbers
ArrayCopy(p_Classes,TempP_Classes);
ArrayCopy(P_EntropyArr,TempP_EntropyArr);
ArrayCopy(p_ClassNumbers,TempP_ClassNumbers);
//---
string Node[1];
Node[0] = DataColumnNames[max_gain];
if (m_debug)
printf("Parent Node will be %s with IG = %.5f",Node[0],IGArr[max_gain]);
if (A == 0)
DrawTree(Node,"parent",A);
DrawTree(p_Classes,"child",A);
if (m_debug)
{
Print("Parent Entropy Array and Class Numbers");
ArrayPrint(p_Classes);
ArrayPrint(P_EntropyArr);
ArrayPrint(p_ClassNumbers);
//Print("New Array");
//MatrixPrint(m_dataset,cols,rows_total);
}
//--- CLASSIFY THE MATRIX
MatrixClassify(m_dataset,p_Classes,cols);
if (m_debug)
{
Print("Classified matrix dataset");
ArrayPrint(DataColumnNames);
MatrixPrint(m_dataset,cols,ArraySize(m_dataset));
}
//--- Search if there is zero entropy in Array
int zero_entropy_index = 0;
bool zero_entropy = false;
for (int e=0; e<ArraySize(P_EntropyArr); e++)
if (P_EntropyArr[e] == 0) { zero_entropy = true; zero_entropy_index=e; break; }
if (zero_entropy) //if there is zero in the Entropy Array
{
MatrixRemoveRow(m_dataset,p_Classes[zero_entropy_index],cols);
rows_total = ArraySize(m_dataset); //New number of total rows from Array
if (m_debug)
{
printf("%s is A LEAF NODE its Rows have been removed from the dataset remaining Dataset is ..",p_Classes[zero_entropy_index]);
ArrayPrint(DataColumnNames);
MatrixPrint(m_dataset,cols,rows_total);
}
//we also remove the entropy form the Array and its information everywehre else from the parent Node That we are going ot build next
ArrayRemove(P_EntropyArr,zero_entropy_index,1);
ArrayRemove(p_Classes,zero_entropy_index,1);
ArrayRemove(p_ClassNumbers,zero_entropy_index,1);
}
if (m_debug)
Print("rows total ",rows_total," ",p_Classes[zero_entropy_index]," ",p_ClassNumbers[zero_entropy_index]);
//--- REMOVING THE PARENT/ ROOT NODE FROM OUR DATASET
MatrixRemoveColumn(m_dataset,max_gain,cols);
// After removing the columns assing the new values to these global variables
cols = cols-1; // remove that one column that has been removed
rows_total = rows_total - single_rowstotal; //remove the size of one column rows
// we also remove the column from column names Array
ArrayRemove(DataColumnNames,max_gain,1);
//---
printf("Column %d removed from the Matrix, The remaining dataset is",max_gain+1);
ArrayPrint(DataColumnNames);
MatrixPrint(m_dataset,cols,rows_total);
//---
Print("Final Parent Entropy Array and Class Numbers");
ArrayPrint(p_Classes);
ArrayPrint(P_EntropyArr);
ArrayPrint(p_ClassNumbers);
}
//--- free the memory
ArrayFree(TempP_ClassNumbers);
ArrayFree(TempP_Classes);
ArrayFree(TempP_EntropyArr);
ArrayFree(p_ClassNumbers);
ArrayFree(p_Classes);
ArrayFree(P_EntropyArr);
ArrayFree(ClassNumbers);
ArrayFree(IGArr);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::DrawTree(string &Classes[],string node, int index)
{
int file_handle = FileOpen(tree_file,FILE_WRITE|FILE_READ|FILE_ANSI|FILE_TXT,",");
if (file_handle != INVALID_HANDLE)
{
string left = " ";
string right = " ";
//--- Concatenate String from Array
if (node == "root")
{
string write = "";
for (int i=0; i<ArraySize(Classes); i++)
{
write += Classes[i];
for (int j=0; j<int(single_rowstotal/2.5); j++)
{
left += left;
right = left;
}
write = left + write + right;
}
FileWrite(file_handle,write);
FileWrite(file_handle," "); //put a space
FileWrite(file_handle," "); //put a space
}
//---
else if (node == "child")
{
left = " ";
right = " ";
string write = "";
for (int A = 0; A<index; A++)
for (int i=0; i<ArraySize(Classes); i++)
{
for (int j=0; j<int(single_rowstotal/2.5); j++)
left += left;
StringSetLength(left,uint(MathSqrt(StringLen(left))));
right = left+left;
write += left + Classes[i] + right;
}
//---
if (FileSeek(file_handle,0,SEEK_END))
{
FileWrite(file_handle,write);
FileWrite(file_handle," "); //put a space
FileWrite(file_handle," "); //put a space
FileWrite(file_handle," "); //put a space
FileWrite(file_handle," "); //put a space
}
}
else Print("Unknown Node, Could draw it to ",tree_file);
FileClose(file_handle);
}
else
printf("Could not draw a decision Tree to file %s Invalid File Handle Err %d ",tree_file,GetLastError());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::MatrixClassify(string &dataArr[],string &Classes[], int cols)
{
string ClassifiedArr[];
ArrayResize(ClassifiedArr,ArraySize(dataArr));
int fill_start = 0, fill_ends = 0;
int index = 0;
for (int i = 0; i<ArraySize(Classes); i++)
{
int start = 0; int curr_col = 0;
for (int j = 0; j<ArraySize(dataArr); j++)
{
curr_col++;
if (Classes[i] == dataArr[j])
{
//printf("Classes[%d] = %s dataArr[%d] = %s ",i,Classes[i],j,dataArr[j]);
if (curr_col == 1)
fill_start = j;
else
{
if (j>curr_col)
fill_start = j - (curr_col-1);
else fill_start = (curr_col-1) - j;
fill_start = fill_start;
//Print("j ",j," j-currcol ",j-(curr_col-1)," curr_col ",curr_col," columns ",cols," fill start ",fill_start );
}
fill_ends = fill_start + cols;
//printf("fillstart %d fillends %d j index = %d i = %d ",fill_start,fill_ends,j,i);
//---
//if (ArraySize(ClassifiedArr) >= ArraySize(dataArr)) break;
//Print("ArraySize Classified Arr ",ArraySize(ClassifiedArr)," dataArr size ",ArraySize(dataArr)," i ",i);
for (int k=fill_start; k<fill_ends; k++)
{
index++;
//printf(" k %d index %d",k,index);
//printf("dataArr[%d] = %s index = %d",k,dataArr[k],index-1);
ClassifiedArr[index-1] = dataArr[k];
}
if (index >= ArraySize(dataArr)) break; //might be infinite loop if this occurs
}
if (curr_col == cols) curr_col = 0;
}
if (index >= ArraySize(dataArr)) break; //might be infinite loop if this occurs
}
ArrayCopy(dataArr,ClassifiedArr);
ArrayFree(ClassifiedArr);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::MatrixRemoveRow(string &dataArr[], string content_detect, int cols)
{
int curr_col = 0; //The current column we are at
int fill_start = 0, fill_ends = 0;
for (int i=0; i<ArraySize(dataArr); i++)
{
curr_col++;
if (dataArr[i] == content_detect)
{
if (curr_col == 1)
fill_start = i;
else
{
fill_start = i-curr_col;
}
fill_ends = fill_start + cols;
//printf("fill start %d ends %d",fill_start,fill_ends);
//ArrayPrint(dataArr,0,NULL,fill_start,fill_ends-fill_start);
//Arrayfill string
for (int j=fill_start; j<fill_ends; j++)
dataArr[j] = NULL;
}
//---
if (curr_col == cols) curr_col = 0;
}
//--- Now it's time to remove all the NULL
string NewArr[];
int index=0;
for (int i=0; i<ArraySize(dataArr); i++,)
{
if (dataArr[i] != NULL)
{
index++;
ArrayResize(NewArr,index);
NewArr[index-1] = dataArr[i];
}
if (index > ArraySize(dataArr)) //this might be infinite loop
break;
}
ArrayFree(dataArr);
ArrayCopy(dataArr,NewArr);
ArrayFree(NewArr); //dump this Array
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::MatrixRemoveColumn(string &dataArr[],int column,int rows)
{
int index = column;
for (int i=0; i<ArraySize(dataArr); i++)
{
//Print("i = ",i," column ",column);
dataArr[column] = NULL;
column += rows;
if (column >= ArraySize(dataArr))
break; //were done adding null values
}
//--- After we've just put the null values it's time to remove those null values
for (int i=-0; i<ArraySize(dataArr); i++)
if (dataArr[i] == NULL)
ArrayRemove(dataArr,i,1); //remove that specific item only
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTree::Entropy(int &SampleNumbers[],int total)
{
double Entropy = 0;
double entropy_out =0; //the value of entropy that will be returned
for (int i=0; i<ArraySize(SampleNumbers); i++)
{
if (SampleNumbers[i] == 0) { Entropy = 0; break; } //Exception
double probability1 = Proba(SampleNumbers[i],total);
Entropy += probability1 * log2(probability1);
}
if (Entropy==0) entropy_out = 0; //handle the exception
else entropy_out = -Entropy;
return(entropy_out);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTree::InformationGain(double parent_entropy, double &EntropyArr[], int &ClassNumbers[], int rows_)
{
double IG = 0;
for (int i=0; i<ArraySize(EntropyArr); i++)
{
double prob = ClassNumbers[i]/double(rows_);
IG += prob * EntropyArr[i];
}
return(parent_entropy - IG);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTree::Proba(int number,double total)
{
return(number/total);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTree::log2(double value)
{
return (log10(value)/log10(2));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CDecisionTree::fileopen(void)
{
m_handle = FileOpen(m_filename,FILE_READ|FILE_CSV|FILE_ANSI,m_delimiter);
if (m_handle == INVALID_HANDLE)
{
return(false);
Print(__FUNCTION__," Invalid csv handle err=",GetLastError());
}
return (true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::GetAllDataToArray(string &toArr[])
{
int counter=0;
for (int i=0; i<ArraySize(m_dataColsArray); i++)
{
if (fileopen())
{
int column = 0, rows=0;
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
column++;
//---
if (column==(int)m_dataColsArray[i])
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(toArr,counter); //array size for all the columns
toArr[counter-1]=data;
}
else
DataColumnNames[i]=data;
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
rows_total += rows-1; //since we are avoiding the first row we have to also remove it's number on the list here
//adding a plus equals sign to rows total ensures that we get the total number of rows for the entire dataset
}
FileClose(m_handle);
}
if (m_debug)
Print("All data Array Size ",ArraySize(toArr)," consuming ", sizeof(toArr)," bytes of memory");
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::GetColumnDatatoArray(int from_column_number, string &toArr[])
{
int counter=0;
int column = 0, rows=0;
fileopen();
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
column++;
//---
if (column==from_column_number)
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(toArr,counter);
toArr[counter-1]=data;
}
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
FileClose(m_handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::MatrixPrint(string &Matrix[],int rows,int cols,int digits=0)
{
Print("[ ");
int start = 0;
//if (rows>=cols)
for (int i=0; i<cols; i++)
{
ArrayPrint(Matrix,digits,NULL,start,rows);
start += rows;
}
printf("] \ncolumns = %d rows = %d",rows,cols);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTree::MatrixUnTranspose(string &Matrix[],int torows, int tocolumns)
{
int rows, columns;
string Temp_Mat[]; //temporary array
rows = torows;
columns = tocolumns;
//--- UnTransposing Array Starting
ArrayResize(Temp_Mat,ArraySize(Matrix));
int index=0; int start_incr = 0;
for (int C=0; C<columns; C++)
{
start_incr= C; //the columns are the ones resposible for shaping the new array
for (int R=0; R<rows; R++, index++)
{
//if (m_debug)
//Print("Old Array Access key = ",index," New Array Access Key = ",start_incr);
Temp_Mat[index] = Matrix[start_incr];
start_incr += columns;
}
}
ArrayCopy(Matrix,Temp_Mat);
ArrayFree(Temp_Mat);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| DecisionTree-Classification-tree-MQL5/decisiontree_2.mqh/0 | {
"file_path": "DecisionTree-Classification-tree-MQL5/decisiontree_2.mqh",
"repo_id": "DecisionTree-Classification-tree-MQL5",
"token_count": 18219
} | 1 |
# Generated by Django 3.2.3 on 2021-09-14 03:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0002_alter_bookinstance_imprint'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='author',
name='first_name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='author',
name='last_name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='bookinstance',
name='id',
field=models.UUIDField(default=uuid.uuid4, help_text='Unique ID for this Particular Book across the whole library', primary_key=True, serialize=False),
),
]
| Django-locallibrary/LocalLibrary/catalog/migrations/0003_auto_20210914_0632.py/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/migrations/0003_auto_20210914_0632.py",
"repo_id": "Django-locallibrary",
"token_count": 545
} | 2 |
"""
Package containing all pip commands
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
# There is currently a bug in python/typeshed mentioned at
# https://github.com/python/typeshed/issues/3906 which causes the
# return type of difflib.get_close_matches to be reported
# as List[Sequence[str]] whereas it should have been List[str]
from __future__ import absolute_import
import importlib
from collections import OrderedDict, namedtuple
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any
from pip._internal.cli.base_command import Command
CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary')
# The ordering matters for help display.
# Also, even though the module path starts with the same
# "pip._internal.commands" prefix in each case, we include the full path
# because it makes testing easier (specifically when modifying commands_dict
# in test setup / teardown by adding info for a FakeCommand class defined
# in a test-related module).
# Finally, we need to pass an iterable of pairs here rather than a dict
# so that the ordering won't be lost when using Python 2.7.
commands_dict = OrderedDict([
('install', CommandInfo(
'pip._internal.commands.install', 'InstallCommand',
'Install packages.',
)),
('download', CommandInfo(
'pip._internal.commands.download', 'DownloadCommand',
'Download packages.',
)),
('uninstall', CommandInfo(
'pip._internal.commands.uninstall', 'UninstallCommand',
'Uninstall packages.',
)),
('freeze', CommandInfo(
'pip._internal.commands.freeze', 'FreezeCommand',
'Output installed packages in requirements format.',
)),
('list', CommandInfo(
'pip._internal.commands.list', 'ListCommand',
'List installed packages.',
)),
('show', CommandInfo(
'pip._internal.commands.show', 'ShowCommand',
'Show information about installed packages.',
)),
('check', CommandInfo(
'pip._internal.commands.check', 'CheckCommand',
'Verify installed packages have compatible dependencies.',
)),
('config', CommandInfo(
'pip._internal.commands.configuration', 'ConfigurationCommand',
'Manage local and global configuration.',
)),
('search', CommandInfo(
'pip._internal.commands.search', 'SearchCommand',
'Search PyPI for packages.',
)),
('cache', CommandInfo(
'pip._internal.commands.cache', 'CacheCommand',
"Inspect and manage pip's wheel cache.",
)),
('wheel', CommandInfo(
'pip._internal.commands.wheel', 'WheelCommand',
'Build wheels from your requirements.',
)),
('hash', CommandInfo(
'pip._internal.commands.hash', 'HashCommand',
'Compute hashes of package archives.',
)),
('completion', CommandInfo(
'pip._internal.commands.completion', 'CompletionCommand',
'A helper command used for command completion.',
)),
('debug', CommandInfo(
'pip._internal.commands.debug', 'DebugCommand',
'Show information useful for debugging.',
)),
('help', CommandInfo(
'pip._internal.commands.help', 'HelpCommand',
'Show help for commands.',
)),
]) # type: OrderedDict[str, CommandInfo]
def create_command(name, **kwargs):
# type: (str, **Any) -> Command
"""
Create an instance of the Command class with the given name.
"""
module_path, class_name, summary = commands_dict[name]
module = importlib.import_module(module_path)
command_class = getattr(module, class_name)
command = command_class(name=name, summary=summary, **kwargs)
return command
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
name = name.lower()
close_commands = get_close_matches(name, commands_dict.keys())
if close_commands:
return close_commands[0]
else:
return False
| Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 1457
} | 3 |
"""Configuration management setup
Some terminology:
- name
As written in config files.
- value
Value associated with a name
- key
Name combined with it's section (section.name)
- variant
A single word describing where the configuration key-value pair came from
"""
import locale
import logging
import os
import sys
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import (
ConfigurationError,
ConfigurationFileCouldNotBeLoaded,
)
from pip._internal.utils import appdirs
from pip._internal.utils.compat import WINDOWS, expanduser
from pip._internal.utils.misc import ensure_dir, enum
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import (
Any, Dict, Iterable, List, NewType, Optional, Tuple
)
RawConfigParser = configparser.RawConfigParser # Shorthand
Kind = NewType("Kind", str)
logger = logging.getLogger(__name__)
# NOTE: Maybe use the optionx attribute to normalize keynames.
def _normalize_name(name):
# type: (str) -> str
"""Make a name consistent regardless of source (environment or file)
"""
name = name.lower().replace('_', '-')
if name.startswith('--'):
name = name[2:] # only prefer long opts
return name
def _disassemble_key(name):
# type: (str) -> List[str]
if "." not in name:
error_message = (
"Key does not contain dot separated section and key. "
"Perhaps you wanted to use 'global.{}' instead?"
).format(name)
raise ConfigurationError(error_message)
return name.split(".", 1)
# The kinds of configurations there are.
kinds = enum(
USER="user", # User Specific
GLOBAL="global", # System Wide
SITE="site", # [Virtual] Environment Specific
ENV="env", # from PIP_CONFIG_FILE
ENV_VAR="env-var", # from Environment Variables
)
CONFIG_BASENAME = 'pip.ini' if WINDOWS else 'pip.conf'
def get_configuration_files():
# type: () -> Dict[Kind, List[str]]
global_config_files = [
os.path.join(path, CONFIG_BASENAME)
for path in appdirs.site_config_dirs('pip')
]
site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME)
legacy_config_file = os.path.join(
expanduser('~'),
'pip' if WINDOWS else '.pip',
CONFIG_BASENAME,
)
new_config_file = os.path.join(
appdirs.user_config_dir("pip"), CONFIG_BASENAME
)
return {
kinds.GLOBAL: global_config_files,
kinds.SITE: [site_config_file],
kinds.USER: [legacy_config_file, new_config_file],
}
class Configuration(object):
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated, load_only=None):
# type: (bool, Optional[Kind]) -> None
super(Configuration, self).__init__()
_valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.SITE, None]
if load_only not in _valid_load_only:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, _valid_load_only[:-1]))
)
)
self.isolated = isolated
self.load_only = load_only
# The order here determines the override order.
self._override_order = [
kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
]
self._ignore_env_names = ["version", "help"]
# Because we keep track of where we got the data from
self._parsers = {
variant: [] for variant in self._override_order
} # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
self._config = {
variant: {} for variant in self._override_order
} # type: Dict[Kind, Dict[str, Any]]
self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]]
def load(self):
# type: () -> None
"""Loads configuration from configuration files and environment
"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self):
# type: () -> Optional[str]
"""Returns the file with highest priority in configuration
"""
assert self.load_only is not None, \
"Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self):
# type: () -> Iterable[Tuple[str, Any]]
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key):
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError("No such key - {}".format(key))
def set_value(self, key, value):
# type: (str, Any) -> None
"""Modify a value in the configuration.
"""
self._ensure_have_load_only()
assert self.load_only
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key):
# type: (str) -> None
"""Unset a value in the configuration."""
self._ensure_have_load_only()
assert self.load_only
if key not in self._config[self.load_only]:
raise ConfigurationError("No such key - {}".format(key))
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
if not (parser.has_section(section)
and parser.remove_option(section, name)):
# The option was not removed.
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
# The section may be empty after the option was removed.
if not parser.items(section):
parser.remove_section(section)
self._mark_as_modified(fname, parser)
del self._config[self.load_only][key]
def save(self):
# type: () -> None
"""Save the current in-memory state.
"""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
with open(fname, "w") as f:
parser.write(f)
#
# Private routines
#
def _ensure_have_load_only(self):
# type: () -> None
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self):
# type: () -> Dict[str, Any]
"""A dictionary representing the loaded configuration.
"""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in self._override_order:
retval.update(self._config[variant])
return retval
def _load_config_files(self):
# type: () -> None
"""Loads configuration from configuration files
"""
config_files = dict(self.iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set in `load_only`, load only
# that variant, not the others.
if self.load_only is not None and variant != self.load_only:
logger.debug(
"Skipping file '%s' (variant: %s)", fname, variant
)
continue
parser = self._load_file(variant, fname)
# Keeping track of the parsers used
self._parsers[variant].append((fname, parser))
def _load_file(self, variant, fname):
# type: (Kind, str) -> RawConfigParser
logger.debug("For variant '%s', will try loading '%s'", variant, fname)
parser = self._construct_parser(fname)
for section in parser.sections():
items = parser.items(section)
self._config[variant].update(self._normalized_keys(section, items))
return parser
def _construct_parser(self, fname):
# type: (str) -> RawConfigParser
parser = configparser.RawConfigParser()
# If there is no such file, don't bother reading it but create the
# parser anyway, to hold the data.
# Doing this is useful when modifying and saving files, where we don't
# need to construct a parser.
if os.path.exists(fname):
try:
parser.read(fname)
except UnicodeDecodeError:
# See https://github.com/pypa/pip/issues/4963
raise ConfigurationFileCouldNotBeLoaded(
reason="contains invalid {} characters".format(
locale.getpreferredencoding(False)
),
fname=fname,
)
except configparser.Error as error:
# See https://github.com/pypa/pip/issues/4893
raise ConfigurationFileCouldNotBeLoaded(error=error)
return parser
def _load_environment_vars(self):
# type: () -> None
"""Loads configuration from environment variables
"""
self._config[kinds.ENV_VAR].update(
self._normalized_keys(":env:", self.get_environ_vars())
)
def _normalized_keys(self, section, items):
# type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
def get_environ_vars(self):
# type: () -> Iterable[Tuple[str, str]]
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
should_be_yielded = (
key.startswith("PIP_") and
key[4:].lower() not in self._ignore_env_names
)
if should_be_yielded:
yield key[4:].lower(), val
# XXX: This is patched in the tests.
def iter_config_files(self):
# type: () -> Iterable[Tuple[Kind, List[str]]]
"""Yields variant and configuration files associated with it.
This should be treated like items of a dictionary.
"""
# SMELL: Move the conditions out of this function
# environment variables have the lowest priority
config_file = os.environ.get('PIP_CONFIG_FILE', None)
if config_file is not None:
yield kinds.ENV, [config_file]
else:
yield kinds.ENV, []
config_files = get_configuration_files()
# at the base we have any global configuration
yield kinds.GLOBAL, config_files[kinds.GLOBAL]
# per-user configuration next
should_load_user_config = not self.isolated and not (
config_file and os.path.exists(config_file)
)
if should_load_user_config:
# The legacy config file is overridden by the new config file
yield kinds.USER, config_files[kinds.USER]
# finally virtualenv configuration first trumping others
yield kinds.SITE, config_files[kinds.SITE]
def get_values_in_config(self, variant):
# type: (Kind) -> Dict[str, Any]
"""Get values present in a config file"""
return self._config[variant]
def _get_parser_to_modify(self):
# type: () -> Tuple[str, RawConfigParser]
# Determine which parser to modify
assert self.load_only
parsers = self._parsers[self.load_only]
if not parsers:
# This should not happen if everything works correctly.
raise ConfigurationError(
"Fatal Internal error [id=2]. Please report as a bug."
)
# Use the highest priority parser.
return parsers[-1]
# XXX: This is patched in the tests.
def _mark_as_modified(self, fname, parser):
# type: (str, RawConfigParser) -> None
file_parser_tuple = (fname, parser)
if file_parser_tuple not in self._modified_parsers:
self._modified_parsers.append(file_parser_tuple)
def __repr__(self):
# type: () -> str
return "{}({!r})".format(self.__class__.__name__, self._dictionary)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/configuration.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/configuration.py",
"repo_id": "Django-locallibrary",
"token_count": 6065
} | 4 |
"""
The main purpose of this module is to expose LinkCollector.collect_links().
"""
import cgi
import functools
import itertools
import logging
import mimetypes
import os
import re
from collections import OrderedDict
from pip._vendor import html5lib, requests
from pip._vendor.distlib.compat import unescape
from pip._vendor.requests.exceptions import RetryError, SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.utils import raise_for_status
from pip._internal.utils.filetypes import ARCHIVE_EXTENSIONS
from pip._internal.utils.misc import pairwise, redact_auth_from_url
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url, url_to_path
from pip._internal.vcs import is_url, vcs
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import (
Callable, Iterable, List, MutableMapping, Optional,
Protocol, Sequence, Tuple, TypeVar, Union,
)
import xml.etree.ElementTree
from pip._vendor.requests import Response
from pip._internal.network.session import PipSession
HTMLElement = xml.etree.ElementTree.Element
ResponseHeaders = MutableMapping[str, str]
# Used in the @lru_cache polyfill.
F = TypeVar('F')
class LruCache(Protocol):
def __call__(self, maxsize=None):
# type: (Optional[int]) -> Callable[[F], F]
raise NotImplementedError
logger = logging.getLogger(__name__)
# Fallback to noop_lru_cache in Python 2
# TODO: this can be removed when python 2 support is dropped!
def noop_lru_cache(maxsize=None):
# type: (Optional[int]) -> Callable[[F], F]
def _wrapper(f):
# type: (F) -> F
return f
return _wrapper
_lru_cache = getattr(functools, "lru_cache", noop_lru_cache) # type: LruCache
def _match_vcs_scheme(url):
# type: (str) -> Optional[str]
"""Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
"""
for scheme in vcs.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
return scheme
return None
def _is_url_like_archive(url):
# type: (str) -> bool
"""Return whether the URL looks like an archive.
"""
filename = Link(url).filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
return True
return False
class _NotHTML(Exception):
def __init__(self, content_type, request_desc):
# type: (str, str) -> None
super(_NotHTML, self).__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response):
# type: (Response) -> None
"""Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
"""
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url, session):
# type: (str, PipSession) -> None
"""Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
raise_for_status(resp)
_ensure_html_header(resp)
def _get_html_response(url, session):
# type: (str, PipSession) -> Response
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
raise_for_status(resp)
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp
def _get_encoding_from_headers(headers):
# type: (ResponseHeaders) -> Optional[str]
"""Determine if we have any encoding information in our headers.
"""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None
def _determine_base_url(document, page_url):
# type: (HTMLElement, str) -> str
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _clean_url_path_part(part):
# type: (str) -> str
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib_parse.quote(urllib_parse.unquote(part))
def _clean_file_url_path(part):
# type: (str) -> str
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib_request.pathname2url(urllib_request.url2pathname(part))
# percent-encoded: /
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
def _clean_url_path(path, is_local_path):
# type: (str, bool) -> str
"""
Clean the path portion of a URL.
"""
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return ''.join(cleaned_parts)
def _clean_link(url):
# type: (str) -> str
"""
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
"""
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`.
result = urllib_parse.urlparse(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib_parse.urlunparse(result._replace(path=path))
def _create_link_from_element(
anchor, # type: HTMLElement
page_url, # type: str
base_url, # type: str
):
# type: (...) -> Optional[Link]
"""
Convert an anchor element in a simple repository page to a Link.
"""
href = anchor.get("href")
if not href:
return None
url = _clean_link(urllib_parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
# This is a unicode string in Python 2 (and 3).
yanked_reason = unescape(yanked_reason)
link = Link(
url,
comes_from=page_url,
requires_python=pyrequire,
yanked_reason=yanked_reason,
)
return link
class CacheablePageContent(object):
def __init__(self, page):
# type: (HTMLPage) -> None
assert page.cache_link_parsing
self.page = page
def __eq__(self, other):
# type: (object) -> bool
return (isinstance(other, type(self)) and
self.page.url == other.page.url)
def __hash__(self):
# type: () -> int
return hash(self.page.url)
def with_cached_html_pages(
fn, # type: Callable[[HTMLPage], Iterable[Link]]
):
# type: (...) -> Callable[[HTMLPage], List[Link]]
"""
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
"""
@_lru_cache(maxsize=None)
def wrapper(cacheable_page):
# type: (CacheablePageContent) -> List[Link]
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page):
# type: (HTMLPage) -> List[Link]
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper
@with_cached_html_pages
def parse_links(page):
# type: (HTMLPage) -> Iterable[Link]
"""
Parse an HTML document, and yield its anchor elements as Link objects.
"""
document = html5lib.parse(
page.content,
transport_encoding=page.encoding,
namespaceHTMLElements=False,
)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall(".//a"):
link = _create_link_from_element(
anchor,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(
self,
content, # type: bytes
encoding, # type: Optional[str]
url, # type: str
cache_link_parsing=True, # type: bool
):
# type: (...) -> None
"""
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
"""
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
def __str__(self):
# type: () -> str
return redact_auth_from_url(self.url)
def _handle_get_page_fail(
link, # type: Link
reason, # type: Union[str, Exception]
meth=None # type: Optional[Callable[..., None]]
):
# type: (...) -> None
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_html_page(response, cache_link_parsing=True):
# type: (Response, bool) -> HTMLPage
encoding = _get_encoding_from_headers(response.headers)
return HTMLPage(
response.content,
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing)
def _get_html_page(link, session=None):
# type: (Link, Optional[PipSession]) -> Optional[HTMLPage]
if session is None:
raise TypeError(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
url = link.url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning('Cannot look at %s URL %s because it does not support '
'lookup as web pages.', vcs_scheme, link)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib_parse.urlparse(url)
if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.warning(
'Skipping page %s because it looks like an archive, and cannot '
'be checked by a HTTP HEAD request.', link,
)
except _NotHTML as exc:
logger.warning(
'Skipping page %s because the %s request got Content-Type: %s.'
'The only supported Content-Type is text/html',
link, exc.request_desc, exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_page_fail(link, exc)
except RetryError as exc:
_handle_get_page_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_page_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_page_fail(link, "connection error: {}".format(exc))
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
return _make_html_page(resp,
cache_link_parsing=link.cache_link_parsing)
return None
def _remove_duplicate_links(links):
# type: (Iterable[Link]) -> List[Link]
"""
Return a list of links, with duplicates removed and ordering preserved.
"""
# We preserve the ordering when removing duplicates because we can.
return list(OrderedDict.fromkeys(links))
def group_locations(locations, expand_dir=False):
# type: (Sequence[str], bool) -> Tuple[List[str], List[str]]
"""
Divide a list of locations into two groups: "files" (archives) and "urls."
:return: A pair of lists (files, urls).
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
# type: (str) -> None
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
else:
logger.warning(
"Path '%s' is ignored: it is a directory.", path,
)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url,
)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url,
)
return files, urls
class CollectedLinks(object):
"""
Encapsulates the return value of a call to LinkCollector.collect_links().
The return value includes both URLs to project pages containing package
links, as well as individual package Link objects collected from other
sources.
This info is stored separately as:
(1) links from the configured file locations,
(2) links from the configured find_links, and
(3) urls to HTML project pages, as described by the PEP 503 simple
repository API.
"""
def __init__(
self,
files, # type: List[Link]
find_links, # type: List[Link]
project_urls, # type: List[Link]
):
# type: (...) -> None
"""
:param files: Links from file locations.
:param find_links: Links from find_links.
:param project_urls: URLs to HTML project pages, as described by
the PEP 503 simple repository API.
"""
self.files = files
self.find_links = find_links
self.project_urls = project_urls
class LinkCollector(object):
"""
Responsible for collecting Link objects from all configured locations,
making network requests as needed.
The class's main method is its collect_links() method.
"""
def __init__(
self,
session, # type: PipSession
search_scope, # type: SearchScope
):
# type: (...) -> None
self.search_scope = search_scope
self.session = session
@classmethod
def create(cls, session, options, suppress_no_index=False):
# type: (PipSession, Values, bool) -> LinkCollector
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
'Ignoring indexes: %s',
','.join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links, index_urls=index_urls,
)
link_collector = LinkCollector(
session=session, search_scope=search_scope,
)
return link_collector
@property
def find_links(self):
# type: () -> List[str]
return self.search_scope.find_links
def fetch_page(self, location):
# type: (Link) -> Optional[HTMLPage]
"""
Fetch an HTML page containing package links.
"""
return _get_html_page(location, session=self.session)
def collect_links(self, project_name):
# type: (str) -> CollectedLinks
"""Find all available links for the given project name.
:return: All the Link objects (unfiltered), as a CollectedLinks object.
"""
search_scope = self.search_scope
index_locations = search_scope.get_index_urls_locations(project_name)
index_file_loc, index_url_loc = group_locations(index_locations)
fl_file_loc, fl_url_loc = group_locations(
self.find_links, expand_dir=True,
)
file_links = [
Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)
]
# We trust every directly linked archive in find_links
find_link_links = [Link(url, '-f') for url in self.find_links]
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links.
# We want to filter out anything that does not have a secure origin.
url_locations = [
link for link in itertools.chain(
# Mark PyPI indices as "cache_link_parsing == False" -- this
# will avoid caching the result of parsing the page for links.
(Link(url, cache_link_parsing=False) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
)
if self.session.is_secure_origin(link)
]
url_locations = _remove_duplicate_links(url_locations)
lines = [
'{} location(s) to search for versions of {}:'.format(
len(url_locations), project_name,
),
]
for link in url_locations:
lines.append('* {}'.format(link))
logger.debug('\n'.join(lines))
return CollectedLinks(
files=file_links,
find_links=find_link_links,
project_urls=url_locations,
)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/index/collector.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/index/collector.py",
"repo_id": "Django-locallibrary",
"token_count": 9409
} | 5 |
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.utils.models import KeyBasedCompareMixin
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from pip._vendor.packaging.version import _BaseVersion
from pip._internal.models.link import Link
class InstallationCandidate(KeyBasedCompareMixin):
"""Represents a potential "candidate" for installation.
"""
__slots__ = ["name", "version", "link"]
def __init__(self, name, version, link):
# type: (str, str, Link) -> None
self.name = name
self.version = parse_version(version) # type: _BaseVersion
self.link = link
super(InstallationCandidate, self).__init__(
key=(self.name, self.version, self.link),
defining_class=InstallationCandidate
)
def __repr__(self):
# type: () -> str
return "<InstallationCandidate({!r}, {!r}, {!r})>".format(
self.name, self.version, self.link,
)
def __str__(self):
# type: () -> str
return '{!r} candidate (version {} at {})'.format(
self.name, self.version, self.link,
)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/models/candidate.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/models/candidate.py",
"repo_id": "Django-locallibrary",
"token_count": 482
} | 6 |
"""Legacy editable installation process, i.e. `setup.py develop`.
"""
import logging
from pip._internal.utils.logging import indent_log
from pip._internal.utils.setuptools_build import make_setuptools_develop_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Sequence
from pip._internal.build_env import BuildEnvironment
logger = logging.getLogger(__name__)
def install_editable(
install_options, # type: List[str]
global_options, # type: Sequence[str]
prefix, # type: Optional[str]
home, # type: Optional[str]
use_user_site, # type: bool
name, # type: str
setup_py_path, # type: str
isolated, # type: bool
build_env, # type: BuildEnvironment
unpacked_source_directory, # type: str
):
# type: (...) -> None
"""Install a package in editable mode. Most arguments are pass-through
to setuptools.
"""
logger.info('Running setup.py develop for %s', name)
args = make_setuptools_develop_args(
setup_py_path,
global_options=global_options,
install_options=install_options,
no_user_config=isolated,
prefix=prefix,
home=home,
use_user_site=use_user_site,
)
with indent_log():
with build_env:
call_subprocess(
args,
cwd=unpacked_source_directory,
)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py",
"repo_id": "Django-locallibrary",
"token_count": 591
} | 7 |
from __future__ import absolute_import
import logging
from collections import OrderedDict
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.exceptions import InstallationError
from pip._internal.models.wheel import Wheel
from pip._internal.utils import compatibility_tags
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List, Optional, Tuple
from pip._internal.req.req_install import InstallRequirement
logger = logging.getLogger(__name__)
class RequirementSet(object):
def __init__(self, check_supported_wheels=True):
# type: (bool) -> None
"""Create a RequirementSet.
"""
self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] # noqa: E501
self.check_supported_wheels = check_supported_wheels
self.unnamed_requirements = [] # type: List[InstallRequirement]
def __str__(self):
# type: () -> str
requirements = sorted(
(req for req in self.requirements.values() if not req.comes_from),
key=lambda req: canonicalize_name(req.name),
)
return ' '.join(str(req.req) for req in requirements)
def __repr__(self):
# type: () -> str
requirements = sorted(
self.requirements.values(),
key=lambda req: canonicalize_name(req.name),
)
format_string = '<{classname} object; {count} requirement(s): {reqs}>'
return format_string.format(
classname=self.__class__.__name__,
count=len(requirements),
reqs=', '.join(str(req.req) for req in requirements),
)
def add_unnamed_requirement(self, install_req):
# type: (InstallRequirement) -> None
assert not install_req.name
self.unnamed_requirements.append(install_req)
def add_named_requirement(self, install_req):
# type: (InstallRequirement) -> None
assert install_req.name
project_name = canonicalize_name(install_req.name)
self.requirements[project_name] = install_req
def add_requirement(
self,
install_req, # type: InstallRequirement
parent_req_name=None, # type: Optional[str]
extras_requested=None # type: Optional[Iterable[str]]
):
# type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
# If the markers do not match, ignore this requirement.
if not install_req.match_markers(extras_requested):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
install_req.name, install_req.markers,
)
return [], None
# If the wheel is not supported, raise an error.
# Should check this after filtering out based on environment markers to
# allow specifying different wheels based on the environment/OS, in a
# single requirements file.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
tags = compatibility_tags.get_supported()
if (self.check_supported_wheels and not wheel.supported(tags)):
raise InstallationError(
"{} is not a supported wheel on this platform.".format(
wheel.filename)
)
# This next bit is really a sanity check.
assert not install_req.user_supplied or parent_req_name is None, (
"a user supplied req shouldn't have a parent"
)
# Unnamed requirements are scanned again and the requirement won't be
# added as a dependency until after scanning.
if not install_req.name:
self.add_unnamed_requirement(install_req)
return [install_req], None
try:
existing_req = self.get_requirement(
install_req.name) # type: Optional[InstallRequirement]
except KeyError:
existing_req = None
has_conflicting_requirement = (
parent_req_name is None and
existing_req and
not existing_req.constraint and
existing_req.extras == install_req.extras and
existing_req.req.specifier != install_req.req.specifier
)
if has_conflicting_requirement:
raise InstallationError(
"Double requirement given: {} (already in {}, name={!r})"
.format(install_req, existing_req, install_req.name)
)
# When no existing requirement exists, add the requirement as a
# dependency and it will be scanned again after.
if not existing_req:
self.add_named_requirement(install_req)
# We'd want to rescan this requirement later
return [install_req], install_req
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
if install_req.constraint or not existing_req.constraint:
return [], existing_req
does_not_satisfy_constraint = (
install_req.link and
not (
existing_req.link and
install_req.link.path == existing_req.link.path
)
)
if does_not_satisfy_constraint:
raise InstallationError(
"Could not satisfy constraints for '{}': "
"installation from path or url cannot be "
"constrained to a version".format(install_req.name)
)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
# If we're now installing a user supplied requirement,
# mark the existing object as such.
if install_req.user_supplied:
existing_req.user_supplied = True
existing_req.extras = tuple(sorted(
set(existing_req.extras) | set(install_req.extras)
))
logger.debug(
"Setting %s extras to: %s",
existing_req, existing_req.extras,
)
# Return the existing requirement for addition to the parent and
# scanning again.
return [existing_req], existing_req
def has_requirement(self, name):
# type: (str) -> bool
project_name = canonicalize_name(name)
return (
project_name in self.requirements and
not self.requirements[project_name].constraint
)
def get_requirement(self, name):
# type: (str) -> InstallRequirement
project_name = canonicalize_name(name)
if project_name in self.requirements:
return self.requirements[project_name]
raise KeyError("No project with the name {name!r}".format(**locals()))
@property
def all_requirements(self):
# type: () -> List[InstallRequirement]
return self.unnamed_requirements + list(self.requirements.values())
| Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_set.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_set.py",
"repo_id": "Django-locallibrary",
"token_count": 3252
} | 8 |
import sys
from pip._internal.cli.main import main
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List
def _wrapper(args=None):
# type: (Optional[List[str]]) -> int
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/entrypoints.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/entrypoints.py",
"repo_id": "Django-locallibrary",
"token_count": 378
} | 9 |
"""Utilities related archives.
"""
from __future__ import absolute_import
import logging
import os
import shutil
import stat
import tarfile
import zipfile
from pip._internal.exceptions import InstallationError
from pip._internal.utils.filetypes import (
BZ2_EXTENSIONS,
TAR_EXTENSIONS,
XZ_EXTENSIONS,
ZIP_EXTENSIONS,
)
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Iterable, List, Optional, Text, Union
from zipfile import ZipInfo
logger = logging.getLogger(__name__)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def current_umask():
# type: () -> int
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def split_leading_dir(path):
# type: (Union[str, Text]) -> List[Union[str, Text]]
path = path.lstrip('/').lstrip('\\')
if (
'/' in path and (
('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path
)
):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return [path, '']
def has_leading_dir(paths):
# type: (Iterable[Union[str, Text]]) -> bool
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def is_within_directory(directory, target):
# type: ((Union[str, Text]), (Union[str, Text])) -> bool
"""
Return true if the absolute path of target is within the directory
"""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def set_extracted_file_to_default_mode_plus_executable(path):
# type: (Union[str, Text]) -> None
"""
Make file present at path have execute for user/group/world
(chmod +x) is no-op on windows per python docs
"""
os.chmod(path, (0o777 & ~current_umask() | 0o111))
def zip_item_is_executable(info):
# type: (ZipInfo) -> bool
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
return bool(mode and stat.S_ISREG(mode) and mode & 0o111)
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not is_within_directory(location, fn):
message = (
'The zip file ({}) has a file ({}) trying to install '
'outside target directory ({})'
)
raise InstallationError(message.format(filename, fn, location))
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
if zip_item_is_executable(info):
set_extracted_file_to_default_mode_plus_executable(fn)
finally:
zipfp.close()
def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([
member.name for member in tar.getmembers()
])
for member in tar.getmembers():
fn = member.name
if leading:
# https://github.com/python/mypy/issues/1174
fn = split_leading_dir(fn)[1] # type: ignore
path = os.path.join(location, fn)
if not is_within_directory(location, path):
message = (
'The tar file ({}) has a file ({}) trying to install '
'outside target directory ({})'
)
raise InstallationError(
message.format(filename, path, location)
)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
assert fp is not None
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
# https://github.com/python/typeshed/issues/2673
tar.utime(member, path) # type: ignore
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
set_extracted_file_to_default_mode_plus_executable(path)
finally:
tar.close()
def unpack_file(
filename, # type: str
location, # type: str
content_type=None, # type: Optional[str]
):
# type: (...) -> None
filename = os.path.realpath(filename)
if (
content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)
):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (
content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS
)
):
untar_file(filename, location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of {}'.format(location)
)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/unpacking.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/unpacking.py",
"repo_id": "Django-locallibrary",
"token_count": 4386
} | 10 |
"""Orchestrator for building wheels from InstallRequirements.
"""
import logging
import os.path
import re
import shutil
from pip._internal.models.link import Link
from pip._internal.operations.build.wheel import build_wheel_pep517
from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
from pip._internal.utils.setuptools_build import make_setuptools_clean_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Any, Callable, Iterable, List, Optional, Tuple,
)
from pip._internal.cache import WheelCache
from pip._internal.req.req_install import InstallRequirement
BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
logger = logging.getLogger(__name__)
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE)
def _contains_egg_info(s):
# type: (str) -> bool
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s))
def _should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
# From this point, this concerns the pip install command only
# (need_wheel=False).
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
if not req.use_pep517 and not is_wheel_installed():
# we don't build legacy requirements if wheel is not installed
logger.info(
"Using legacy 'setup.py install' for %s, "
"since package 'wheel' is not installed.", req.name,
)
return False
return True
def should_build_for_wheel_command(
req, # type: InstallRequirement
):
# type: (...) -> bool
return _should_build(
req, need_wheel=True, check_binary_allowed=_always_true
)
def should_build_for_install_command(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
return _should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
)
def _should_cache(
req, # type: InstallRequirement
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and _should_build()
has determined a wheel needs to be built.
"""
if req.editable or not req.source_dir:
# never cache editable requirements
return False
if req.link and req.link.is_vcs:
# VCS checkout. Do not cache
# unless it points to an immutable commit hash.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
assert req.link
base, ext = req.link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, do not cache.
return False
def _get_cache_dir(
req, # type: InstallRequirement
wheel_cache, # type: WheelCache
):
# type: (...) -> str
"""Return the persistent or temporary cache directory where the built
wheel need to be stored.
"""
cache_available = bool(wheel_cache.cache_dir)
assert req.link
if cache_available and _should_cache(req):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir
def _always_true(_):
# type: (Any) -> bool
return True
def _build_one(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
return None
# Install build deps into temporary directory (PEP 518)
with req.build_env:
return _build_one_inside_env(
req, output_dir, build_options, global_options
)
def _build_one_inside_env(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
with TempDirectory(kind="wheel") as temp_dir:
assert req.name
if req.use_pep517:
assert req.metadata_directory
wheel_path = build_wheel_pep517(
name=req.name,
backend=req.pep517_backend,
metadata_directory=req.metadata_directory,
build_options=build_options,
tempd=temp_dir.path,
)
else:
wheel_path = build_wheel_legacy(
name=req.name,
setup_py_path=req.setup_py_path,
source_dir=req.unpacked_source_directory,
global_options=global_options,
build_options=build_options,
tempd=temp_dir.path,
)
if wheel_path is not None:
wheel_name = os.path.basename(wheel_path)
dest_path = os.path.join(output_dir, wheel_name)
try:
wheel_hash, length = hash_file(wheel_path)
shutil.move(wheel_path, dest_path)
logger.info('Created wheel for %s: '
'filename=%s size=%d sha256=%s',
req.name, wheel_name, length,
wheel_hash.hexdigest())
logger.info('Stored in directory: %s', output_dir)
return dest_path
except Exception as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
# Ignore return, we can't do anything else useful.
if not req.use_pep517:
_clean_one_legacy(req, global_options)
return None
def _clean_one_legacy(req, global_options):
# type: (InstallRequirement, List[str]) -> bool
clean_args = make_setuptools_clean_args(
req.setup_py_path,
global_options=global_options,
)
logger.info('Running setup.py clean for %s', req.name)
try:
call_subprocess(clean_args, cwd=req.source_dir)
return True
except Exception:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> BuildResult
"""Build wheels.
:return: The list of InstallRequirement that succeeded to build and
the list of InstallRequirement that failed to build.
"""
if not requirements:
return [], []
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join(req.name for req in requirements), # type: ignore
)
with indent_log():
build_successes, build_failures = [], []
for req in requirements:
cache_dir = _get_cache_dir(req, wheel_cache)
wheel_file = _build_one(
req, cache_dir, build_options, global_options
)
if wheel_file:
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
req.local_file_path = req.link.file_path
assert req.link.is_wheel
build_successes.append(req)
else:
build_failures.append(req)
# notify success/failure
if build_successes:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_successes]), # type: ignore
)
if build_failures:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failures]), # type: ignore
)
# Return a list of requirements that failed to build
return build_successes, build_failures
| Django-locallibrary/env/Lib/site-packages/pip/_internal/wheel_builder.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/wheel_builder.py",
"repo_id": "Django-locallibrary",
"token_count": 4106
} | 11 |
"""
The httplib2 algorithms ported for use with requests.
"""
import logging
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
logger = logging.getLogger(__name__)
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(
self, cache=None, cache_etags=True, serializer=None, status_codes=None
):
self.cache = DictCache() if cache is None else cache
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
self.cacheable_status_codes = status_codes or (200, 203, 300, 301)
@classmethod
def _urlnorm(cls, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
@classmethod
def cache_url(cls, uri):
return cls._urlnorm(uri)
def parse_cache_control(self, headers):
known_directives = {
# https://tools.ietf.org/html/rfc7234#section-5.2
"max-age": (int, True),
"max-stale": (int, False),
"min-fresh": (int, True),
"no-cache": (None, False),
"no-store": (None, False),
"no-transform": (None, False),
"only-if-cached": (None, False),
"must-revalidate": (None, False),
"public": (None, False),
"private": (None, False),
"proxy-revalidate": (None, False),
"s-maxage": (int, True),
}
cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
retval = {}
for cc_directive in cc_headers.split(","):
if not cc_directive.strip():
continue
parts = cc_directive.split("=", 1)
directive = parts[0].strip()
try:
typ, required = known_directives[directive]
except KeyError:
logger.debug("Ignoring unknown cache-control directive: %s", directive)
continue
if not typ or not required:
retval[directive] = None
if typ:
try:
retval[directive] = typ(parts[1].strip())
except IndexError:
if required:
logger.debug(
"Missing value for cache-control " "directive: %s",
directive,
)
except ValueError:
logger.debug(
"Invalid value for cache-control directive " "%s, must be %s",
directive,
typ.__name__,
)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
# Bail out if the request insists on fresh data
if "no-cache" in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return False
if "max-age" in cc and cc["max-age"] == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return False
# Request allows serving from the cache, let's see if we find something
cache_data = self.cache.get(cache_url)
if cache_data is None:
logger.debug("No cache entry available")
return False
# Check whether it can be deserialized
resp = self.serializer.loads(request, cache_data)
if not resp:
logger.warning("Cache entry deserialization failed, entry ignored")
return False
# If we have a cached 301, return it immediately. We don't
# need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if resp.status == 301:
msg = (
'Returning cached "301 Moved Permanently" response '
"(ignoring date and etag information)"
)
logger.debug(msg)
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or "date" not in headers:
if "etag" not in headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug("Purging cached response: no date or etag")
self.cache.delete(cache_url)
logger.debug("Ignoring cached response: no date")
return False
now = time.time()
date = calendar.timegm(parsedate_tz(headers["date"]))
current_age = max(0, now - date)
logger.debug("Current age based on date: %i", current_age)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if "max-age" in resp_cc:
freshness_lifetime = resp_cc["max-age"]
logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif "expires" in headers:
expires = parsedate_tz(headers["expires"])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if "max-age" in cc:
freshness_lifetime = cc["max-age"]
logger.debug(
"Freshness lifetime from request max-age: %i", freshness_lifetime
)
if "min-fresh" in cc:
min_fresh = cc["min-fresh"]
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug("Adjusted current age from min-fresh: %i", current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug("%i > %i", freshness_lifetime, current_age)
return resp
# we're not fresh. If we don't have an Etag, clear it out
if "etag" not in headers:
logger.debug('The cached response is "stale" with no etag, purging')
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if "etag" in headers:
new_headers["If-None-Match"] = headers["ETag"]
if "last-modified" in headers:
new_headers["If-Modified-Since"] = headers["Last-Modified"]
return new_headers
def cache_response(self, request, response, body=None, status_codes=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
cacheable_status_codes = status_codes or self.cacheable_status_codes
if response.status not in cacheable_status_codes:
logger.debug(
"Status code %s not in %s", response.status, cacheable_status_codes
)
return
response_headers = CaseInsensitiveDict(response.headers)
# If we've been given a body, our response has a Content-Length, that
# Content-Length is valid then we can check to see if the body we've
# been given matches the expected size, and if it doesn't we'll just
# skip trying to cache it.
if (
body is not None
and "content-length" in response_headers
and response_headers["content-length"].isdigit()
and int(response_headers["content-length"]) != len(body)
):
return
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
logger.debug('Updating cache with response from "%s"', cache_url)
# Delete it from the cache if we happen to have it stored there
no_store = False
if "no-store" in cc:
no_store = True
logger.debug('Response header has "no-store"')
if "no-store" in cc_req:
no_store = True
logger.debug('Request header has "no-store"')
if no_store and self.cache.get(cache_url):
logger.debug('Purging existing cache entry to honor "no-store"')
self.cache.delete(cache_url)
if no_store:
return
# https://tools.ietf.org/html/rfc7234#section-4.1:
# A Vary header field-value of "*" always fails to match.
# Storing such a response leads to a deserialization warning
# during cache lookup and is not allowed to ever be served,
# so storing it can be avoided.
if "*" in response_headers.get("vary", ""):
logger.debug('Response header has "Vary: *"')
return
# If we've been given an etag, then keep the response
if self.cache_etags and "etag" in response_headers:
logger.debug("Caching due to etag")
self.cache.set(
cache_url, self.serializer.dumps(request, response, body=body)
)
# Add to the cache any 301s. We do this before looking that
# the Date headers.
elif response.status == 301:
logger.debug("Caching permanant redirect")
self.cache.set(cache_url, self.serializer.dumps(request, response))
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif "date" in response_headers:
# cache when there is a max-age > 0
if "max-age" in cc and cc["max-age"] > 0:
logger.debug("Caching b/c date exists and max-age > 0")
self.cache.set(
cache_url, self.serializer.dumps(request, response, body=body)
)
# If the request can expire, it means we should cache it
# in the meantime.
elif "expires" in response_headers:
if response_headers["expires"]:
logger.debug("Caching b/c of expires header")
self.cache.set(
cache_url, self.serializer.dumps(request, response, body=body)
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = ["content-length"]
cached_response.headers.update(
dict(
(k, v)
for k, v in response.headers.items()
if k.lower() not in excluded_headers
)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(cache_url, self.serializer.dumps(request, cached_response))
return cached_response
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py",
"repo_id": "Django-locallibrary",
"token_count": 6275
} | 12 |
"""
All of the Enums that are used throughout the chardet package.
:author: Dan Blanchard (dan.blanchard@gmail.com)
"""
class InputState(object):
"""
This enum represents the different states a universal detector can be in.
"""
PURE_ASCII = 0
ESC_ASCII = 1
HIGH_BYTE = 2
class LanguageFilter(object):
"""
This enum represents the different language filters we can apply to a
``UniversalDetector``.
"""
CHINESE_SIMPLIFIED = 0x01
CHINESE_TRADITIONAL = 0x02
JAPANESE = 0x04
KOREAN = 0x08
NON_CJK = 0x10
ALL = 0x1F
CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
CJK = CHINESE | JAPANESE | KOREAN
class ProbingState(object):
"""
This enum represents the different states a prober can be in.
"""
DETECTING = 0
FOUND_IT = 1
NOT_ME = 2
class MachineState(object):
"""
This enum represents the different states a state machine can be in.
"""
START = 0
ERROR = 1
ITS_ME = 2
class SequenceLikelihood(object):
"""
This enum represents the likelihood of a character following the previous one.
"""
NEGATIVE = 0
UNLIKELY = 1
LIKELY = 2
POSITIVE = 3
@classmethod
def get_num_categories(cls):
""":returns: The number of likelihood categories in the enum."""
return 4
class CharacterCategory(object):
"""
This enum represents the different categories language models for
``SingleByteCharsetProber`` put characters into.
Anything less than CONTROL is considered a letter.
"""
UNDEFINED = 255
LINE_BREAK = 254
SYMBOL = 253
DIGIT = 252
CONTROL = 251
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/enums.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/enums.py",
"repo_id": "Django-locallibrary",
"token_count": 626
} | 13 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
try:
from collections.abc import Callable
except ImportError:
from collections import Callable
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registry."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py",
"repo_id": "Django-locallibrary",
"token_count": 10707
} | 14 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
from .util import parse_requirement
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
# this is a method only to support alternative implementations
# via overriding
def parse_requirement(self, s):
return parse_requirement(s)
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
r = self.parse_requirement(s)
if not r:
raise ValueError('Not valid: %r' % s)
self.name = r.name
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if r.constraints:
# import pdb; pdb.set_trace()
for op, s in r.constraints:
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # minimum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is probably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile(r'^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/version.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/version.py",
"repo_id": "Django-locallibrary",
"token_count": 11127
} | 15 |
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import unichr as chr
from collections import deque, OrderedDict
from sys import version_info
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
if version_info >= (3, 7):
attributeMap = dict
else:
attributeMap = OrderedDict
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["StartTag"]:
raw = token["data"]
data = attributeMap(raw)
if len(raw) > len(data):
# we had some duplicated attribute, fix so first wins
data.update(raw[::-1])
token["data"] = data
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py",
"repo_id": "Django-locallibrary",
"token_count": 38389
} | 16 |
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker
:arg walker: the treewalker to use to walk the tree to convert it
:arg handler: SAX handler to use
"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py",
"repo_id": "Django-locallibrary",
"token_count": 774
} | 17 |
from .core import encode, decode, alabel, ulabel, IDNAError
import codecs
import re
_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
class Codec(codecs.Codec):
def encode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return "", 0
return encode(data), len(data)
def decode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return u"", 0
return decode(data), len(data)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return ("", 0)
labels = _unicode_dots_re.split(data)
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(alabel(label))
if size:
size += 1
size += len(label)
# Join with U+002E
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return (u"", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(data, unicode):
labels = _unicode_dots_re.split(data)
else:
# Must be ASCII string
data = str(data)
unicode(data, "ascii")
labels = data.split(".")
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = u'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = u'.'
result = []
size = 0
for label in labels:
result.append(ulabel(label))
if size:
size += 1
size += len(label)
result = u".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/idna/codec.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/idna/codec.py",
"repo_id": "Django-locallibrary",
"token_count": 1596
} | 18 |
# coding: utf-8
from collections import namedtuple
import datetime
import sys
import struct
PY2 = sys.version_info[0] == 2
if PY2:
int_types = (int, long)
_utc = None
else:
int_types = int
try:
_utc = datetime.timezone.utc
except AttributeError:
_utc = datetime.timezone(datetime.timedelta(0))
class ExtType(namedtuple("ExtType", "code data")):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
class Timestamp(object):
"""Timestamp represents the Timestamp extension type in msgpack.
When built with Cython, msgpack uses C methods to pack and unpack `Timestamp`. When using pure-Python
msgpack, :func:`to_bytes` and :func:`from_bytes` are used to pack and unpack `Timestamp`.
This class is immutable: Do not override seconds and nanoseconds.
"""
__slots__ = ["seconds", "nanoseconds"]
def __init__(self, seconds, nanoseconds=0):
"""Initialize a Timestamp object.
:param int seconds:
Number of seconds since the UNIX epoch (00:00:00 UTC Jan 1 1970, minus leap seconds).
May be negative.
:param int nanoseconds:
Number of nanoseconds to add to `seconds` to get fractional time.
Maximum is 999_999_999. Default is 0.
Note: Negative times (before the UNIX epoch) are represented as negative seconds + positive ns.
"""
if not isinstance(seconds, int_types):
raise TypeError("seconds must be an interger")
if not isinstance(nanoseconds, int_types):
raise TypeError("nanoseconds must be an integer")
if not (0 <= nanoseconds < 10 ** 9):
raise ValueError(
"nanoseconds must be a non-negative integer less than 999999999."
)
self.seconds = seconds
self.nanoseconds = nanoseconds
def __repr__(self):
"""String representation of Timestamp."""
return "Timestamp(seconds={0}, nanoseconds={1})".format(
self.seconds, self.nanoseconds
)
def __eq__(self, other):
"""Check for equality with another Timestamp object"""
if type(other) is self.__class__:
return (
self.seconds == other.seconds and self.nanoseconds == other.nanoseconds
)
return False
def __ne__(self, other):
"""not-equals method (see :func:`__eq__()`)"""
return not self.__eq__(other)
def __hash__(self):
return hash((self.seconds, self.nanoseconds))
@staticmethod
def from_bytes(b):
"""Unpack bytes into a `Timestamp` object.
Used for pure-Python msgpack unpacking.
:param b: Payload from msgpack ext message with code -1
:type b: bytes
:returns: Timestamp object unpacked from msgpack ext payload
:rtype: Timestamp
"""
if len(b) == 4:
seconds = struct.unpack("!L", b)[0]
nanoseconds = 0
elif len(b) == 8:
data64 = struct.unpack("!Q", b)[0]
seconds = data64 & 0x00000003FFFFFFFF
nanoseconds = data64 >> 34
elif len(b) == 12:
nanoseconds, seconds = struct.unpack("!Iq", b)
else:
raise ValueError(
"Timestamp type can only be created from 32, 64, or 96-bit byte objects"
)
return Timestamp(seconds, nanoseconds)
def to_bytes(self):
"""Pack this Timestamp object into bytes.
Used for pure-Python msgpack packing.
:returns data: Payload for EXT message with code -1 (timestamp type)
:rtype: bytes
"""
if (self.seconds >> 34) == 0: # seconds is non-negative and fits in 34 bits
data64 = self.nanoseconds << 34 | self.seconds
if data64 & 0xFFFFFFFF00000000 == 0:
# nanoseconds is zero and seconds < 2**32, so timestamp 32
data = struct.pack("!L", data64)
else:
# timestamp 64
data = struct.pack("!Q", data64)
else:
# timestamp 96
data = struct.pack("!Iq", self.nanoseconds, self.seconds)
return data
@staticmethod
def from_unix(unix_sec):
"""Create a Timestamp from posix timestamp in seconds.
:param unix_float: Posix timestamp in seconds.
:type unix_float: int or float.
"""
seconds = int(unix_sec // 1)
nanoseconds = int((unix_sec % 1) * 10 ** 9)
return Timestamp(seconds, nanoseconds)
def to_unix(self):
"""Get the timestamp as a floating-point value.
:returns: posix timestamp
:rtype: float
"""
return self.seconds + self.nanoseconds / 1e9
@staticmethod
def from_unix_nano(unix_ns):
"""Create a Timestamp from posix timestamp in nanoseconds.
:param int unix_ns: Posix timestamp in nanoseconds.
:rtype: Timestamp
"""
return Timestamp(*divmod(unix_ns, 10 ** 9))
def to_unix_nano(self):
"""Get the timestamp as a unixtime in nanoseconds.
:returns: posix timestamp in nanoseconds
:rtype: int
"""
return self.seconds * 10 ** 9 + self.nanoseconds
def to_datetime(self):
"""Get the timestamp as a UTC datetime.
Python 2 is not supported.
:rtype: datetime.
"""
return datetime.datetime.fromtimestamp(self.to_unix(), _utc)
@staticmethod
def from_datetime(dt):
"""Create a Timestamp from datetime with tzinfo.
Python 2 is not supported.
:rtype: Timestamp
"""
return Timestamp.from_unix(dt.timestamp())
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/ext.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/ext.py",
"repo_id": "Django-locallibrary",
"token_count": 2642
} | 19 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class InfinityType(object):
def __repr__(self):
# type: () -> str
return "Infinity"
def __hash__(self):
# type: () -> int
return hash(repr(self))
def __lt__(self, other):
# type: (object) -> bool
return False
def __le__(self, other):
# type: (object) -> bool
return False
def __eq__(self, other):
# type: (object) -> bool
return isinstance(other, self.__class__)
def __ne__(self, other):
# type: (object) -> bool
return not isinstance(other, self.__class__)
def __gt__(self, other):
# type: (object) -> bool
return True
def __ge__(self, other):
# type: (object) -> bool
return True
def __neg__(self):
# type: (object) -> NegativeInfinityType
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType(object):
def __repr__(self):
# type: () -> str
return "-Infinity"
def __hash__(self):
# type: () -> int
return hash(repr(self))
def __lt__(self, other):
# type: (object) -> bool
return True
def __le__(self, other):
# type: (object) -> bool
return True
def __eq__(self, other):
# type: (object) -> bool
return isinstance(other, self.__class__)
def __ne__(self, other):
# type: (object) -> bool
return not isinstance(other, self.__class__)
def __gt__(self, other):
# type: (object) -> bool
return False
def __ge__(self, other):
# type: (object) -> bool
return False
def __neg__(self):
# type: (object) -> InfinityType
return Infinity
NegativeInfinity = NegativeInfinityType()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/_structures.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/_structures.py",
"repo_id": "Django-locallibrary",
"token_count": 848
} | 20 |
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division, print_function
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
try:
from time import monotonic
except ImportError:
from time import time as monotonic
__version__ = '1.5'
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class Infinite(object):
file = stderr
sma_window = 10 # Simple Moving Average window
check_tty = True
hide_cursor = True
def __init__(self, message='', **kwargs):
self.index = 0
self.start_ts = monotonic()
self.avg = 0
self._avg_update_ts = self.start_ts
self._ts = self.start_ts
self._xput = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
self._width = 0
self.message = message
if self.file and self.is_tty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def elapsed(self):
return int(monotonic() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update_avg(self, n, dt):
if n > 0:
xput_len = len(self._xput)
self._xput.append(dt / n)
now = monotonic()
# update when we're still filling _xput, then after every second
if (xput_len < self.sma_window or
now - self._avg_update_ts > 1):
self.avg = sum(self._xput) / len(self._xput)
self._avg_update_ts = now
def update(self):
pass
def start(self):
pass
def clearln(self):
if self.file and self.is_tty():
print('\r\x1b[K', end='', file=self.file)
def write(self, s):
if self.file and self.is_tty():
line = self.message + s.ljust(self._width)
print('\r' + line, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def writeln(self, line):
if self.file and self.is_tty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file and self.is_tty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
def is_tty(self):
return self.file.isatty() if self.check_tty else True
def next(self, n=1):
now = monotonic()
dt = now - self._ts
self.update_avg(n, dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
with self:
for x in it:
yield x
self.next()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
with self:
for x in it:
yield x
self.next()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/progress/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/progress/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 2247
} | 21 |
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
import warnings
from base64 import b64encode
from .compat import urlparse, str, basestring
from .cookies import extract_cookies_to_jar
from ._internal_utils import to_native_string
from .utils import parse_dict_header
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(type(password)),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
authstr = 'Basic ' + to_native_string(
b64encode(b':'.join((username, password))).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
elif _algorithm == 'SHA-256':
def sha256_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha256(x).hexdigest()
hash_utf8 = sha256_utf8
elif _algorithm == 'SHA-512':
def sha512_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha512(x).hexdigest()
hash_utf8 = sha512_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
# If response is not 4xx, do not auth
# See https://github.com/psf/requests/issues/3772
if not 400 <= r.status_code < 500:
self._thread_local.num_401_calls = 1
return r
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/auth.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/auth.py",
"repo_id": "Django-locallibrary",
"token_count": 4698
} | 22 |
import errno
from functools import partial
import select
import sys
try:
from time import monotonic
except ImportError:
from time import time as monotonic
__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
class NoWayToWaitForSocketError(Exception):
pass
# How should we wait on sockets?
#
# There are two types of APIs you can use for waiting on sockets: the fancy
# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
# select/poll. The stateful APIs are more efficient when you have a lots of
# sockets to keep track of, because you can set them up once and then use them
# lots of times. But we only ever want to wait on a single socket at a time
# and don't want to keep track of state, so the stateless APIs are actually
# more efficient. So we want to use select() or poll().
#
# Now, how do we choose between select() and poll()? On traditional Unixes,
# select() has a strange calling convention that makes it slow, or fail
# altogether, for high-numbered file descriptors. The point of poll() is to fix
# that, so on Unixes, we prefer poll().
#
# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
# for it), but that's OK, because on Windows, select() doesn't have this
# strange calling convention; plain select() works fine.
#
# So: on Windows we use select(), and everywhere else we use poll(). We also
# fall back to select() in case poll() is somehow broken or missing.
if sys.version_info >= (3, 5):
# Modern Python, that retries syscalls by default
def _retry_on_intr(fn, timeout):
return fn(timeout)
else:
# Old and broken Pythons.
def _retry_on_intr(fn, timeout):
if timeout is None:
deadline = float("inf")
else:
deadline = monotonic() + timeout
while True:
try:
return fn(timeout)
# OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
except (OSError, select.error) as e:
# 'e.args[0]' incantation works for both OSError and select.error
if e.args[0] != errno.EINTR:
raise
else:
timeout = deadline - monotonic()
if timeout < 0:
timeout = 0
if timeout == float("inf"):
timeout = None
continue
def select_wait_for_socket(sock, read=False, write=False, timeout=None):
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
rcheck = []
wcheck = []
if read:
rcheck.append(sock)
if write:
wcheck.append(sock)
# When doing a non-blocking connect, most systems signal success by
# marking the socket writable. Windows, though, signals success by marked
# it as "exceptional". We paper over the difference by checking the write
# sockets for both conditions. (The stdlib selectors module does the same
# thing.)
fn = partial(select.select, rcheck, wcheck, wcheck)
rready, wready, xready = _retry_on_intr(fn, timeout)
return bool(rready or wready or xready)
def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
mask = 0
if read:
mask |= select.POLLIN
if write:
mask |= select.POLLOUT
poll_obj = select.poll()
poll_obj.register(sock, mask)
# For some reason, poll() takes timeout in milliseconds
def do_poll(t):
if t is not None:
t *= 1000
return poll_obj.poll(t)
return bool(_retry_on_intr(do_poll, timeout))
def null_wait_for_socket(*args, **kwargs):
raise NoWayToWaitForSocketError("no select-equivalent available")
def _have_working_poll():
# Apparently some systems have a select.poll that fails as soon as you try
# to use it, either due to strange configuration or broken monkeypatching
# from libraries like eventlet/greenlet.
try:
poll_obj = select.poll()
_retry_on_intr(poll_obj.poll, 0)
except (AttributeError, OSError):
return False
else:
return True
def wait_for_socket(*args, **kwargs):
# We delay choosing which implementation to use until the first time we're
# called. We could do it at import time, but then we might make the wrong
# decision if someone goes wild with monkeypatching select.poll after
# we're imported.
global wait_for_socket
if _have_working_poll():
wait_for_socket = poll_wait_for_socket
elif hasattr(select, "select"):
wait_for_socket = select_wait_for_socket
else: # Platform-specific: Appengine.
wait_for_socket = null_wait_for_socket
return wait_for_socket(*args, **kwargs)
def wait_for_read(sock, timeout=None):
""" Waits for reading to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, read=True, timeout=timeout)
def wait_for_write(sock, timeout=None):
""" Waits for writing to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, write=True, timeout=timeout)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/wait.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/wait.py",
"repo_id": "Django-locallibrary",
"token_count": 1994
} | 23 |
"""distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
import os
from distutils.core import Command
from distutils.errors import *
from distutils.util import get_platform
def show_formats():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats = []
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist(Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['skip-build']
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = {'posix': 'gztar',
'nt': 'zip'}
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar',
'wininst', 'zip', 'msi']
# And the real information.
format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'xztar': ('bdist_dumb', "xz'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
'msi': ('bdist_msi', "Microsoft Installer")
}
def initialize_options(self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
self.group = None
self.owner = None
def finalize_options(self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
if self.skip_build:
self.plat_name = get_platform()
else:
self.plat_name = self.get_finalized_command('build').plat_name
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create built distributions "
"on platform %s" % os.name)
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError("invalid format '%s'" % format)
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
# passing the owner and group names for tar archiving
if cmd_name == 'bdist_dumb':
sub_cmd.owner = self.owner
sub_cmd.group = self.group
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/bdist.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/bdist.py",
"repo_id": "Django-locallibrary",
"token_count": 2703
} | 24 |
"""distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
from distutils.core import Command
# XXX force is never used
class install_headers(Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_headers.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_headers.py",
"repo_id": "Django-locallibrary",
"token_count": 577
} | 25 |
"""distutils.file_util
Utility functions for operating on single files.
"""
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except OSError as e:
raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
if os.path.exists(dst):
try:
os.unlink(dst)
except OSError as e:
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, e.strerror))
try:
fdst = open(dst, 'wb')
except OSError as e:
raise DistutilsFileError(
"could not create '%s': %s" % (dst, e.strerror))
while True:
try:
buf = fsrc.read(buffer_size)
except OSError as e:
raise DistutilsFileError(
"could not read from '%s': %s" % (src, e.strerror))
if not buf:
break
try:
fdst.write(buf)
except OSError as e:
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, e.strerror))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available. If hardlink fails, falls back to
_copy_file_contents().
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return (dst, 0)
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
try:
os.link(src, dst)
return (dst, 1)
except OSError:
# If hard linking fails, fall back on copying file
# (some special filesystems don't support hard linking
# even under Unix, see issue #8876).
pass
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
return (dst, 1)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=1,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" %
(src, dst))
copy_it = False
try:
os.rename(src, dst)
except OSError as e:
(num, msg) = e.args
if num == errno.EXDEV:
copy_it = True
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except OSError as e:
(num, msg) = e.args
try:
os.unlink(dst)
except OSError:
pass
raise DistutilsFileError(
"couldn't move '%s' to '%s' by copy/delete: "
"delete '%s' failed: %s"
% (src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/file_util.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/file_util.py",
"repo_id": "Django-locallibrary",
"token_count": 3688
} | 26 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
| Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/_structures.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/_structures.py",
"repo_id": "Django-locallibrary",
"token_count": 560
} | 27 |
"""
Create a dist_info directory
As defined in the wheel specification
"""
import os
from distutils.core import Command
from distutils import log
class dist_info(Command):
description = 'create a .dist-info directory'
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
]
def initialize_options(self):
self.egg_base = None
def finalize_options(self):
pass
def run(self):
egg_info = self.get_finalized_command('egg_info')
egg_info.egg_base = self.egg_base
egg_info.finalize_options()
egg_info.run()
dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info'
log.info("creating '{}'".format(os.path.abspath(dist_info_dir)))
bdist_wheel = self.get_finalized_command('bdist_wheel')
bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir)
| Django-locallibrary/env/Lib/site-packages/setuptools/command/dist_info.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/dist_info.py",
"repo_id": "Django-locallibrary",
"token_count": 402
} | 28 |
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'strict' if six.PY2 else 'surrogateescape'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if not six.PY2:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
| Django-locallibrary/env/Lib/site-packages/setuptools/command/upload_docs.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/upload_docs.py",
"repo_id": "Django-locallibrary",
"token_count": 3402
} | 29 |
"""
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
import warnings
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
from ._deprecation_warning import SetuptoolsDeprecationWarning
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests=False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
warnings.warn(
"2to3 support is deprecated. If the project still "
"requires Python 2 support, please migrate to "
"a single-codebase solution or employ an "
"independent conversion process.",
SetuptoolsDeprecationWarning)
log.info("Fixing " + " ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names:
return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| Django-locallibrary/env/Lib/site-packages/setuptools/lib2to3_ex.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/lib2to3_ex.py",
"repo_id": "Django-locallibrary",
"token_count": 1069
} | 30 |
import platform
import ctypes
def windows_only(func):
if platform.system() != 'Windows':
return lambda *args, **kwargs: None
return func
@windows_only
def hide_file(path):
"""
Set the hidden attribute on a file or directory.
From http://stackoverflow.com/questions/19622133/
`path` must be text.
"""
__import__('ctypes.wintypes')
SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
SetFileAttributes.restype = ctypes.wintypes.BOOL
FILE_ATTRIBUTE_HIDDEN = 0x02
ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
if not ret:
raise ctypes.WinError()
| Django-locallibrary/env/Lib/site-packages/setuptools/windows_support.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/windows_support.py",
"repo_id": "Django-locallibrary",
"token_count": 273
} | 31 |
//+------------------------------------------------------------------+
//| gradient-descent test.mq5 |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/job/new?prefered=omegajoctan"
#property version "1.00"
//Hire me on your next big Machine learning project using the above link
#property description "Hire me on your next big Machine Learning project > click the above name"
#property script_show_inputs
//+------------------------------------------------------------------+
#include "GradientDescent.mqh";
CGradientDescent *grad;
//#include "C:\Users\Omega Joctan\AppData\Roaming\MetaQuotes\Terminal\892B47EBC091D6EF95E3961284A76097\MQL5\Experts\DataScience\LinearRegression\LinearRegressionLib.mqh";
//CMatrixRegression *lr;
input bool PrintDebugInfo = true; //set this to false to avoid filling up the logs
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
string filename = "Salary_Data.csv";
//---
double XMatrix[];
double YMatrix[];
grad = new CGradientDescent(1, 0.1,10000,PrintDebugInfo);
grad.ReadCsvCol(filename,1,XMatrix);
grad.ReadCsvCol(filename,2,YMatrix);
grad.MinMaxScaler(XMatrix);
grad.MinMaxScaler(YMatrix);
//
//ArrayPrint("Normalized X",XMatrix);
//ArrayPrint("Normalized Y",YMatrix);
grad.GradientDescentFunction(XMatrix,YMatrix,MSE);
filename = "titanic.csv";
ZeroMemory(XMatrix);
ZeroMemory(YMatrix);
grad.ReadCsvCol(filename,3,XMatrix);
grad.ReadCsvCol(filename,2,YMatrix);
grad.GradientDescentFunction(XMatrix,YMatrix,BCE);
delete (grad);
//delete(lr);
}
//+------------------------------------------------------------------+
void ArrayPrint(string str,double &Arr[])
{
Print(str);
::ArrayPrint(Arr);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+ | Gradient-Descent-MQL5/gradient-descent test.mq5/0 | {
"file_path": "Gradient-Descent-MQL5/gradient-descent test.mq5",
"repo_id": "Gradient-Descent-MQL5",
"token_count": 982
} | 32 |
//+------------------------------------------------------------------+
//| TestScript.mq5 |
//| Copy_nasdaqright 2021, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copy_nasdaqright 2021, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
#property version "1.00"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#include "LinearRegressionLib.mqh";
CSimpleLinearRegression lr;
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
//---
string file_name = "NASDAQ_DATA.csv";
double s_p[];
double y_nasdaq[];
double ma[];
double rsi[];
double y_nasdaq_predicted[];
lr.GetDataToArray(s_p,file_name,",",1);
lr.GetDataToArray(y_nasdaq,file_name,",",2);
lr.GetDataToArray(ma,file_name,",",3);
lr.GetDataToArray(rsi,file_name,",",4);
//---
lr.Init(s_p,y_nasdaq);
{
lr.LinearRegressionMain(y_nasdaq_predicted);
Print(" R_SQUARED = ",lr.r_squared());
Print("slope of a line ",lr.coefficient_of_X());
//Print("coefficient of Nasdaq vs s&p ", lr.coefficient_of_X()," correlation coefficient ",lr.corrcoef(),"\n corrcoef(s_p[],y_nasdaq[]) ",lr.corrcoef(s_p,y_nasdaq));
//Print("Correlation Coefficient NASDAQ vs S&P 500 = ",lr.corrcoef(s_p,y_nasdaq));
//Print("Correlation Coefficient NASDAQ vs 50SMA = ",lr.corrcoef(ma,y_nasdaq));
//Print("Correlation Coefficient NASDAQ Vs rsi = ",lr.corrcoef(rsi,y_nasdaq));
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+ | Linear-Regression-python-and-MQL5/TestScript.mq5/0 | {
"file_path": "Linear-Regression-python-and-MQL5/TestScript.mq5",
"repo_id": "Linear-Regression-python-and-MQL5",
"token_count": 1047
} | 33 |
//+------------------------------------------------------------------+
//| LDA.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include "base.mqh";
#include <MALE5\MqPlotLib\plots.mqh>
enum lda_criterion //selecting best components criteria selection
{
CRITERION_VARIANCE,
CRITERION_KAISER,
CRITERION_SCREE_PLOT
};
class CLDA
{
CPlots plt;
protected:
uint m_components;
lda_criterion m_criterion;
matrix projection_matrix;
ulong num_features;
double m_regparam;
vector mean;
uint calculate_variance(vector &eigen_values, double threshold=0.95);
uint calculate_kaiser(vector &eigen_values);
uint CLDA::extract_components(vector &eigen_values, double threshold=0.95);
public:
CLDA(uint k=NULL, lda_criterion CRITERION_=CRITERION_SCREE_PLOT, double reg_param =1e-6);
~CLDA(void);
matrix fit_transform(const matrix &x, const vector &y);
matrix transform(const matrix &x);
vector transform(const vector &x);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CLDA::CLDA(uint k=NULL, lda_criterion CRITERION_=CRITERION_SCREE_PLOT, double reg_param=1e-6)
:m_components(k),
m_criterion(CRITERION_),
m_regparam(reg_param)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CLDA::~CLDA(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix CLDA::fit_transform(const matrix &x, const vector &y)
{
vector classes = MatrixExtend::Unique(y);
ulong num_classes = classes.Size();
num_features = x.Cols();
this.mean = x.Mean(0);
matrix x_centered = BaseDimRed::subtract(x, this.mean);
matrix class_means(classes.Size(), x.Cols());
class_means.Fill(0.0);
for (ulong i=0; i<num_classes; i++)
{
matrix class_samples = {};
for (ulong j=0, count=0; j<x.Rows(); j++)
{
if (y[j] == classes[i])
{
count++;
class_samples.Resize(count, num_features);
class_samples.Row(x.Row(j), count-1);
}
}
class_means.Row(class_samples.Mean(0), i);
}
matrix SW, SB; //within and between scatter matrices
SW.Init(num_features, num_features);
SB.Init(num_features, num_features);
for (ulong i=0; i<num_classes; i++)
{
matrix class_samples = {};
for (ulong j=0, count=0; j<x.Rows(); j++)
{
if (y[j] == classes[i]) //Collect a matrix for samples belonging to a particular class
{
count++;
class_samples.Resize(count, num_features);
class_samples.Row(x.Row(j), count-1);
}
}
matrix diff = BaseDimRed::subtract(class_samples, class_means.Row(i)); //Each row subtracted to the mean
if (diff.Rows()==0 && diff.Cols()==0) //if the subtracted matrix is zero stop the program for possible bugs or errors
{
DebugBreak();
return x_centered;
}
SW += diff.Transpose().MatMul(diff); //Find within scatter matrix
vector mean_diff = class_means.Row(i) - x_centered.Mean(0);
SB += class_samples.Rows() * mean_diff.Outer(mean_diff); //compute between scatter matrix
}
//--- Regularization to avoid errors while calculating Eigen values and vectors
SW += this.m_regparam * MatrixExtend::eye((uint)num_features);
SB += this.m_regparam * MatrixExtend::eye((uint)num_features);
//---
matrix eigen_vectors(x.Cols(), x.Cols()); eigen_vectors.Fill(0.0);
vector eigen_values(x.Cols()); eigen_values.Fill(0.0);
matrix SBSW = SW.Inv().MatMul(SB);
BaseDimRed::ReplaceNaN(SBSW);
if (!SBSW.Eig(eigen_vectors, eigen_values))
{
Print("%s Failed to calculate eigen values and vectors Err=%d",__FUNCTION__,GetLastError());
DebugBreak();
matrix empty = {};
return empty;
}
//--- Sort eigenvectors by decreasing eigenvalues
vector args = MatrixExtend::ArgSort(eigen_values);
MatrixExtend::Reverse(args);
eigen_values = BaseDimRed::Sort(eigen_values, args);
eigen_vectors = BaseDimRed::Sort(eigen_vectors, args);
//---
if (this.m_components == NULL)
{
this.m_components = extract_components(eigen_values);
if (this.m_components==0)
{
printf("%s Failed to auto detect the best components\n You need to select the value of k yourself by looking at the scree plot",__FUNCTION__);
this.m_components = (uint)x.Cols();
}
}
else //plot the scree plot
extract_components(eigen_values);
this.projection_matrix = BaseDimRed::Slice(eigen_vectors, this.m_components);
return x_centered.MatMul(projection_matrix.Transpose());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix CLDA::transform(const matrix &x)
{
if (this.projection_matrix.Rows() == 0)
{
printf("%s fit_transform method must be called befor transform",__FUNCTION__);
matrix empty = {};
return empty;
}
matrix x_centered = BaseDimRed::subtract(x, this.mean);
return x_centered.MatMul(this.projection_matrix.Transpose());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CLDA::transform(const vector &x)
{
matrix m = MatrixExtend::VectorToMatrix(x, this.num_features);
if (m.Rows()==0)
{
vector empty={};
return empty; //return nothing since there is a failure in converting vector to matrix
}
m = transform(m);
return MatrixExtend::MatrixToVector(m);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
uint CLDA::calculate_variance(vector &eigen_values, double threshold=0.95)
{
uint k=0;
vector eigen_pow = MathPow(eigen_values, 2);
vector cum_sum = eigen_pow.CumSum();
double sum = eigen_pow.Sum();
vector cumulative_variance = cum_sum / sum;
if (MQLInfoInteger(MQL_DEBUG))
Print("Cummulative variance: ",cumulative_variance);
vector v(cumulative_variance.Size()); v.Fill(0.0);
for (ulong i=0; i<v.Size(); i++)
v[i] = (cumulative_variance[i] >= threshold);
k = (uint)v.ArgMax() + 1;
return k;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
uint CLDA::calculate_kaiser(vector &eigen_values)
{
vector v(eigen_values.Size()); v.Fill(0.0);
for (ulong i=0; i<eigen_values.Size(); i++)
v[i] = (eigen_values[i] >= 1);
return uint(v.Sum());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
uint CLDA::extract_components(vector &eigen_values, double threshold=0.95)
{
uint k = 0;
switch(m_criterion)
{
case CRITERION_VARIANCE:
k = calculate_variance(eigen_values, threshold);
break;
case CRITERION_KAISER:
k = calculate_kaiser(eigen_values);
break;
case CRITERION_SCREE_PLOT:
{
vector v_cols(eigen_values.Size());
for (ulong i=0; i<v_cols.Size(); i++)
v_cols[i] = (int)i+1;
vector vars = eigen_values;
plt.Plot("Scree plot",v_cols,vars,"EigenValue","Principal Components","EigenValue", CURVE_POINTS_AND_LINES);
//---
string warn = "\n<<<< WARNING >>>>\nThe Scree plot doesn't return the determined number of k m_components\nThe cummulative variance Or kaiser will return the number of k m_components instead\nThe k returned might be different from what you see on the scree plot";
warn += "\nTo apply the same number of k m_components to the LDA from the scree plot\nCall the LDA model again with that value applied from the plot\n";
Print(warn);
//--- Kaiser
k = calculate_kaiser(eigen_values);
if (k==0) //kaiser wasn't suitable in this particular task
k = calculate_variance(eigen_values, threshold);
}
break;
}
return (k);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/Dimensionality Reduction/LDA.mqh/0 | {
"file_path": "MALE5/Dimensionality Reduction/LDA.mqh",
"repo_id": "MALE5",
"token_count": 4425
} | 34 |
//+------------------------------------------------------------------+
//| plots.mqh |
//| Copyright 2022, Fxalgebra.com |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Fxalgebra.com"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include <Graphics\Graphic.mqh>
#include <MALE5\MatrixExtend.mqh>
class CPlots
{
protected:
CGraphic *graph;
long m_chart_id;
int m_subwin;
int m_x1, m_x2;
int m_y1, m_y2;
string m_font_family;
bool m_chart_show;
string m_plot_names[];
ENUM_CURVE_TYPE m_curve_type;
bool GraphCreate(string plot_name);
vector m_x, m_y;
string x_label, y_label;
public:
CPlots(long chart_id=0, int sub_win=0 ,int x1=30, int y1=40, int x2=550, int y2=310, string font_family="Consolas", bool chart_show=true);
~CPlots(void);
bool Plot(string plot_name, vector& x, vector& y, string x_axis_label, string y_axis_label, string label, ENUM_CURVE_TYPE curve_type=CURVE_POINTS_AND_LINES,color clr = clrDodgerBlue, bool points_fill = true);
bool AddPlot(vector &v,string label="plt",color clr=clrOrange);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CPlots::CPlots(long chart_id=0, int sub_win=0 ,int x1=30, int y1=40, int x2=550, int y2=310, string font_family="Consolas", bool chart_show=true):
m_chart_id(chart_id),
m_subwin(sub_win),
m_x1(x1),
m_y1(y1),
m_x2(x2),
m_y2(y2),
m_font_family(font_family),
m_chart_show(chart_show)
{
graph = new CGraphic();
ChartRedraw(m_chart_id);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CPlots::~CPlots(void)
{
for (int i=0; i<ArraySize(m_plot_names); i++)
ObjectDelete(m_chart_id,m_plot_names[i]);
delete(graph);
ChartRedraw();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CPlots::GraphCreate(string plot_name)
{
ChartRedraw(m_chart_id);
ArrayResize(m_plot_names,ArraySize(m_plot_names)+1);
m_plot_names[ArraySize(m_plot_names)-1] = plot_name;
ChartSetInteger(m_chart_id, CHART_SHOW, m_chart_show);
if(!graph.Create(m_chart_id, plot_name, m_subwin, m_x1, m_y1, m_x2, m_y2))
{
printf("Failed to Create graphical object on the Main chart Err = %d", GetLastError());
return(false);
}
return (true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CPlots::Plot(
string plot_name,
vector& x,
vector& y,
string x_axis_label,
string y_axis_label,
string label,
ENUM_CURVE_TYPE curve_type=CURVE_POINTS_AND_LINES,
color clr = clrDodgerBlue,
bool points_fill = true
)
{
if (!this.GraphCreate(plot_name))
return (false);
//---
this.m_x = x;
this.m_y = y;
this.x_label = x_axis_label;;
this.y_label = y_axis_label;
double x_arr[], y_arr[];
MatrixExtend::VectorToArray(x, x_arr);
MatrixExtend::VectorToArray(y, y_arr);
m_curve_type = curve_type;
graph.CurveAdd(x_arr, y_arr, ColorToARGB(clr), m_curve_type, label);
graph.XAxis().Name(x_axis_label);
graph.XAxis().NameSize(13);
graph.YAxis().Name(y_axis_label);
graph.YAxis().NameSize(13);
graph.FontSet(m_font_family, 13);
graph.CurvePlotAll();
graph.Update();
return(true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CPlots::AddPlot(vector &v, string label="plt",color clr=clrOrange)
{
double x_arr[], y_arr[];
MatrixExtend::VectorToArray(this.m_x, x_arr);
MatrixExtend::VectorToArray(v, y_arr);
if (!graph.CurveAdd(x_arr, y_arr, ColorToARGB(clr), m_curve_type, label))
{
printf("%s failed to add a plot to the existing plot Err =%d",__FUNCTION__,GetLastError());
return false;
}
graph.XAxis().Name(this.x_label);
graph.XAxis().NameSize(13);
graph.YAxis().Name(this.y_label);
graph.YAxis().NameSize(13);
graph.FontSet(m_font_family, 13);
graph.CurvePlotAll();
graph.Update();
return(true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/MqPlotLib/plots.mqh/0 | {
"file_path": "MALE5/MqPlotLib/plots.mqh",
"repo_id": "MALE5",
"token_count": 2511
} | 35 |
//+------------------------------------------------------------------+
//| cross_validation.mqh |
//| Copyright 2022, Fxalgebra.com |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Fxalgebra.com"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include <MALE5\Tensors.mqh>
#include <MALE5\MatrixExtend.mqh>
class CCrossValidation
{
CTensors *tensors[]; //Keep track of all the tensors in memory
public:
CCrossValidation();
~CCrossValidation(void);
CTensors *KFoldCV(matrix &data, uint n_spilts=5);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CCrossValidation::CCrossValidation()
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CCrossValidation::~CCrossValidation(void)
{
for (uint i=0; i<tensors.Size(); i++)
if (CheckPointer(tensors[i]) != POINTER_INVALID)
delete (tensors[i]);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CTensors *CCrossValidation::KFoldCV(matrix &data, uint n_spilts=5)
{
ArrayResize(tensors, tensors.Size()+1);
tensors[tensors.Size()-1] = new CTensors(n_spilts);
int size = (int)MathFloor(data.Rows() / (double)n_spilts);
matrix split_data = {};
for (uint k=0, start = 0; k<n_spilts; k++)
{
split_data = MatrixExtend::Get(data, start, (start+size)-1);
tensors[tensors.Size()-1].Add(split_data, k);
start += size;
}
return tensors[tensors.Size()-1];
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/cross_validation.mqh/0 | {
"file_path": "MALE5/cross_validation.mqh",
"repo_id": "MALE5",
"token_count": 1141
} | 36 |
{
"configurations": [
{
"name": "Win32",
"includePath": [
"${workspaceFolder}/**"
],
"defines": [
"_DEBUG",
"UNICODE",
"_UNICODE"
],
"cStandard": "c17",
"cppStandard": "c++17",
"intelliSenseMode": "windows-msvc-x64"
}
],
"version": 4
} | MatrixRegressionMQL5/Gauss-Jordan c++/.vscode/c_cpp_properties.json/0 | {
"file_path": "MatrixRegressionMQL5/Gauss-Jordan c++/.vscode/c_cpp_properties.json",
"repo_id": "MatrixRegressionMQL5",
"token_count": 275
} | 37 |
//+------------------------------------------------------------------+
//| KMeans.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
#include <Graphics\Graphic.mqh>
CGraphic graph;
//+------------------------------------------------------------------+
enum errors
{
KM_ERR001, //clusters not matching in size Error
KM_ERR002
};
//+------------------------------------------------------------------+
class CKMeans
{
private:
ulong n; //number of samples
uint m_clusters;
ulong m_cols;
matrix InitialCentroids;
vector<double> cluster_assign;
protected:
matrix Matrix;
bool ErrMsg(errors err);
void MatrixToVector(const matrix &mat, vector &vec);
void VectorCopy(vector &src_vec, vector &dst_vec, uint from, uint size);
void vectortoArray(const vector &vec,double &arr[]);
void arraytoVector(const double &arr[],vector &vec);
bool ScatterCurvePlots(
string obj_name,
double &x[],
double &y[],
double &curveArr[],
string legend,
string x_axis_label = "x-axis",
string y_axis_label = "y-axis",
color clr = clrDodgerBlue,
bool points_fill = true
);
public:
CKMeans(const matrix &_Matrix, int clusters=3);
~CKMeans(void);
void KMeansClustering(matrix &clustered_matrix, matrix ¢roids,int iterations = 1,bool rand_cluster =false);
void ElbowMethod(const int initial_k=1, int total_k=10, bool showPlot = true);
void FilterZero(vector &vec);
void matrixtoArray(matrix &mat,double &Array[]);
};
//+------------------------------------------------------------------+
CKMeans::CKMeans(const matrix &_Matrix, int clusters=3)
{
m_clusters = clusters;
Matrix.Copy(_Matrix);
m_cols = Matrix.Cols();
n = Matrix.Rows(); //number of elements | Matrix Rows
}
//+------------------------------------------------------------------+
CKMeans::~CKMeans(void)
{
ZeroMemory(m_clusters);
ZeroMemory(InitialCentroids);
ZeroMemory(cluster_assign);
}
//+------------------------------------------------------------------+
void CKMeans::KMeansClustering(matrix &clustered_matrix,matrix ¢roids,int iterations = 1,bool rand_cluster =false)
{
InitialCentroids.Resize(m_clusters,m_cols);
cluster_assign.Resize(n);
clustered_matrix.Resize(m_clusters, m_clusters*n);
clustered_matrix.Fill(NULL);
vector cluster_comb_v = {};
matrix cluster_comb_m = {};
vector rand_v = {};
ulong rand_ = 0;
for (ulong i=0; i<m_clusters; i++)
{
rand_ = rand_cluster ? (ulong)MathFloor(i*(n/m_clusters)) : i;
rand_v = Matrix.Row(rand_);
InitialCentroids.Row(rand_v, i);
}
//---
vector v_row;
matrix rect_distance = {}; //matrix to store rectilinear distances
rect_distance.Reshape(n, m_clusters);
vector v_matrix = {}, v_centroid = {};
double output = 0;
//---
for (int iter=0; iter<iterations; iter++)
{
for (ulong i=0; i<rect_distance.Rows(); i++)
for (ulong j=0; j<rect_distance.Cols(); j++)
{
v_matrix = Matrix.Row(i); v_centroid = InitialCentroids.Row(j);
ZeroMemory(output);
for (ulong k=0; k<v_matrix.Size(); k++)
output += MathAbs(v_matrix[k] - v_centroid[k]); //Rectilinear distance
rect_distance[i][j] = output;
}
//--- Assigning the Clusters
matrix cluster_cent = {}; //cluster centroids
ulong cluster = 0;
for (ulong i=0; i<rect_distance.Rows(); i++)
{
v_row = rect_distance.Row(i);
cluster = v_row.ArgMin();
cluster_assign[i] = (double)cluster;
}
vector temp_cluster_assign = cluster_assign;
//--- Combining the clusters
for (ulong i=0, index =0; i<cluster_assign.Size(); i++)
{
ZeroMemory(cluster_cent);
bool classified = false;
for (ulong j=0, count = 0; j<temp_cluster_assign.Size(); j++)
{
if (cluster_assign[i] == temp_cluster_assign[j])
{
classified = true;
count++;
cluster_comb_m.Resize(count, m_cols);
cluster_comb_m.Row(Matrix.Row(j) , count-1);
cluster_cent.Resize(count, m_cols);
// New centroids
cluster_cent.Row(Matrix.Row(j),count-1);
temp_cluster_assign[j] = -100; //modify the cluster item so it can no longer be found
}
else continue;
}
//---
if (classified == true)
{
// solving for new cluster and updtating the old ones
MatrixToVector(cluster_comb_m, cluster_comb_v);
if (iter == iterations-1) clustered_matrix.Row(cluster_comb_v, index);
index++;
//---
vector x_y_z = {0,0};
ZeroMemory(rand_v);
for (ulong k=0; k<cluster_cent.Cols(); k++)
{
x_y_z.Resize(cluster_cent.Cols());
rand_v = cluster_cent.Col(k);
x_y_z[k] = rand_v.Mean();
}
InitialCentroids.Row(x_y_z, i);
}
if (index >= m_clusters) break;
}
} //end of iterations
ZeroMemory(centroids);
centroids.Copy(InitialCentroids);
}
//+------------------------------------------------------------------+
bool CKMeans::ErrMsg(errors err)
{
switch(err)
{
case KM_ERR001:
printf("%s Clusters not matching in Size ",EnumToString(KM_ERR001));
break;
default:
break;
}
return(true);
}
//+------------------------------------------------------------------+
void CKMeans::MatrixToVector(const matrix &mat, vector &vec)
{
vec.Resize(mat.Rows()*mat.Cols());
for (ulong i=0, index = 0; i<mat.Rows(); i++)
for (ulong j=0; j<mat.Cols(); j++, index++)
vec[index] = mat[i][j];
}
//+------------------------------------------------------------------+
void CKMeans::ElbowMethod(const int initial_k=1, int total_k=10, bool showPlot = true)
{
matrix clustered_mat, _centroids = {};
if (total_k > (int)n) total_k = (int)n; //k should always be less than n
vector centroid_v={}, x_y_z={};
vector short_v = {}; //vector for each point
vector minus_v = {}; //vector to store the minus operation output
double wcss = 0;
double WCSS[]; ArrayResize(WCSS,total_k);
double kArray[]; ArrayResize(kArray,total_k);
for (int k=initial_k, count_k=0; k<ArraySize(WCSS)+initial_k; k++, count_k++)
{
wcss = 0;
m_clusters = k;
KMeansClustering(clustered_mat,_centroids);
for (ulong i=0; i<_centroids.Rows(); i++)
{
centroid_v = _centroids.Row(i);
x_y_z = clustered_mat.Row(i);
FilterZero(x_y_z);
for (ulong j=0; j<x_y_z.Size()/m_cols; j++)
{
VectorCopy(x_y_z,short_v,uint(j*m_cols),(uint)m_cols);
//--- WCSS ( within cluster sum of squared residuals )
minus_v = (short_v - centroid_v);
minus_v = MathPow(minus_v,2);
wcss += minus_v.Sum();
}
}
WCSS[count_k] = wcss;
kArray[count_k] = k;
}
Print("WCSS"); ArrayPrint(WCSS);
Print("kArray"); ArrayPrint(kArray);
//--- Plotting the Elbow on the graph
if (showPlot)
{
ObjectDelete(0,"elbow");
ScatterCurvePlots("elbow",kArray,WCSS,WCSS,"Elbow line","k","WCSS");
}
}
//+------------------------------------------------------------------+
void CKMeans::FilterZero(vector &vec)
{
vector new_vec = {};
for (ulong i=0,count =0; i<vec.Size(); i++)
{
if (vec[i] != NULL)
{
count++; new_vec.Resize(count);
new_vec[count-1] = vec[i];
}
else continue;
}
vec.Copy(new_vec);
}
//+------------------------------------------------------------------+
void CKMeans::VectorCopy(vector &src_vec, vector &dst_vec, uint from, uint size)
{
dst_vec.Resize(size-from);
double src[]; double dst[];
vectortoArray(src_vec,src); vectortoArray(dst_vec,dst);
ArrayCopy(dst,src,0,(int)from,(int)size);
arraytoVector(src,src_vec); arraytoVector(dst,dst_vec);
}
//+------------------------------------------------------------------+
void CKMeans::vectortoArray(const vector &vec,double &arr[])
{
ArrayResize(arr,(int)vec.Size());
for (int i=0; i<ArraySize(arr); i++) arr[(int)i] = vec[(ulong)i];
}
//+------------------------------------------------------------------+
void CKMeans::arraytoVector(const double &arr[],vector &vec)
{
vec.Resize(ArraySize(arr));
for (ulong i=0; i<vec.Size(); i++) vec[i] = arr[i];
}
//+------------------------------------------------------------------+
bool CKMeans::ScatterCurvePlots(
string obj_name,
double &x[],
double &y[],
double &curveArr[],
string legend,
string x_axis_label = "x-axis",
string y_axis_label = "y-axis",
color clr = clrDodgerBlue,
bool points_fill = true
)
{
if (!graph.Create(0,obj_name,0,30,70,800,640))
{
printf("Failed to Create graphical object on the Main chart Err = %d",GetLastError());
return(false);
}
ChartSetInteger(0,CHART_SHOW,true);
//---
//graph.CurveAdd(x,y,clrBlack,CURVE_POINTS,y_axis_label);
graph.CurveAdd(x,curveArr,clr,CURVE_POINTS_AND_LINES,legend);
graph.XAxis().Name(x_axis_label);
graph.XAxis().NameSize(13);
graph.YAxis().Name(y_axis_label);
graph.YAxis().NameSize(13);
graph.FontSet("Lucida Console",13);
graph.CurvePlotAll();
graph.Update();
return(true);
}
//+------------------------------------------------------------------+
void CKMeans::matrixtoArray(matrix &mat,double &Array[])
{
ArrayFree(Array);
ArrayResize(Array,int(mat.Rows()*mat.Cols()));
int index = 0;
for (ulong i=0; i<mat.Rows(); i++)
for (ulong j=0; j<mat.Cols(); j++, index++)
{
Array[index] = mat[i][j];
}
}
//+------------------------------------------------------------------+
| Data-Mining-MQL5/KMeans.mqh/0 | {
"file_path": "Data-Mining-MQL5/KMeans.mqh",
"repo_id": "Data-Mining-MQL5",
"token_count": 7193
} | 0 |
//+------------------------------------------------------------------+
//| TestScript.mq5 |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
#property version "1.00"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
//#include "DecisionTreeLib.mqh";
#include "decisiontree_2.mqh";
CDecisionTree *tree;
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
//---
tree = new CDecisionTree();
string file_name = "PlayTennis vs wheather.csv";
tree.Init(6,"2,3,4,5",file_name);
tree.BuildTree();
delete (tree);
}
//+------------------------------------------------------------------+
| DecisionTree-Classification-tree-MQL5/TestScript.mq5/0 | {
"file_path": "DecisionTree-Classification-tree-MQL5/TestScript.mq5",
"repo_id": "DecisionTree-Classification-tree-MQL5",
"token_count": 494
} | 1 |
# Generated by Django 3.2.3 on 2021-08-28 05:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bookinstance',
name='imprint',
field=models.CharField(help_text='Imprint Date', max_length=200),
),
]
| Django-locallibrary/LocalLibrary/catalog/migrations/0002_alter_bookinstance_imprint.py/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/migrations/0002_alter_bookinstance_imprint.py",
"repo_id": "Django-locallibrary",
"token_count": 178
} | 2 |
select.admin-autocomplete {
width: 20em;
}
.select2-container--admin-autocomplete.select2-container {
min-height: 30px;
}
.select2-container--admin-autocomplete .select2-selection--single,
.select2-container--admin-autocomplete .select2-selection--multiple {
min-height: 30px;
padding: 0;
}
.select2-container--admin-autocomplete.select2-container--focus .select2-selection,
.select2-container--admin-autocomplete.select2-container--open .select2-selection {
border-color: var(--body-quiet-color);
min-height: 30px;
}
.select2-container--admin-autocomplete.select2-container--focus .select2-selection.select2-selection--single,
.select2-container--admin-autocomplete.select2-container--open .select2-selection.select2-selection--single {
padding: 0;
}
.select2-container--admin-autocomplete.select2-container--focus .select2-selection.select2-selection--multiple,
.select2-container--admin-autocomplete.select2-container--open .select2-selection.select2-selection--multiple {
padding: 0;
}
.select2-container--admin-autocomplete .select2-selection--single {
background-color: var(--body-bg);
border: 1px solid var(--border-color);
border-radius: 4px;
}
.select2-container--admin-autocomplete .select2-selection--single .select2-selection__rendered {
color: var(--body-fg);
line-height: 30px;
}
.select2-container--admin-autocomplete .select2-selection--single .select2-selection__clear {
cursor: pointer;
float: right;
font-weight: bold;
}
.select2-container--admin-autocomplete .select2-selection--single .select2-selection__placeholder {
color: var(--body-quiet-color);
}
.select2-container--admin-autocomplete .select2-selection--single .select2-selection__arrow {
height: 26px;
position: absolute;
top: 1px;
right: 1px;
width: 20px;
}
.select2-container--admin-autocomplete .select2-selection--single .select2-selection__arrow b {
border-color: #888 transparent transparent transparent;
border-style: solid;
border-width: 5px 4px 0 4px;
height: 0;
left: 50%;
margin-left: -4px;
margin-top: -2px;
position: absolute;
top: 50%;
width: 0;
}
.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--single .select2-selection__clear {
float: left;
}
.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--single .select2-selection__arrow {
left: 1px;
right: auto;
}
.select2-container--admin-autocomplete.select2-container--disabled .select2-selection--single {
background-color: var(--darkened-bg);
cursor: default;
}
.select2-container--admin-autocomplete.select2-container--disabled .select2-selection--single .select2-selection__clear {
display: none;
}
.select2-container--admin-autocomplete.select2-container--open .select2-selection--single .select2-selection__arrow b {
border-color: transparent transparent #888 transparent;
border-width: 0 4px 5px 4px;
}
.select2-container--admin-autocomplete .select2-selection--multiple {
background-color: var(--body-bg);
border: 1px solid var(--border-color);
border-radius: 4px;
cursor: text;
}
.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__rendered {
box-sizing: border-box;
list-style: none;
margin: 0;
padding: 0 10px 5px 5px;
width: 100%;
display: flex;
flex-wrap: wrap;
}
.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__rendered li {
list-style: none;
}
.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__placeholder {
color: var(--body-quiet-color);
margin-top: 5px;
float: left;
}
.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__clear {
cursor: pointer;
float: right;
font-weight: bold;
margin: 5px;
position: absolute;
right: 0;
}
.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__choice {
background-color: var(--darkened-bg);
border: 1px solid var(--border-color);
border-radius: 4px;
cursor: default;
float: left;
margin-right: 5px;
margin-top: 5px;
padding: 0 5px;
}
.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__choice__remove {
color: var(--body-quiet-color);
cursor: pointer;
display: inline-block;
font-weight: bold;
margin-right: 2px;
}
.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__choice__remove:hover {
color: var(--body-fg);
}
.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__choice, .select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__placeholder, .select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-search--inline {
float: right;
}
.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__choice {
margin-left: 5px;
margin-right: auto;
}
.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove {
margin-left: 2px;
margin-right: auto;
}
.select2-container--admin-autocomplete.select2-container--focus .select2-selection--multiple {
border: solid var(--body-quiet-color) 1px;
outline: 0;
}
.select2-container--admin-autocomplete.select2-container--disabled .select2-selection--multiple {
background-color: var(--darkened-bg);
cursor: default;
}
.select2-container--admin-autocomplete.select2-container--disabled .select2-selection__choice__remove {
display: none;
}
.select2-container--admin-autocomplete.select2-container--open.select2-container--above .select2-selection--single, .select2-container--admin-autocomplete.select2-container--open.select2-container--above .select2-selection--multiple {
border-top-left-radius: 0;
border-top-right-radius: 0;
}
.select2-container--admin-autocomplete.select2-container--open.select2-container--below .select2-selection--single, .select2-container--admin-autocomplete.select2-container--open.select2-container--below .select2-selection--multiple {
border-bottom-left-radius: 0;
border-bottom-right-radius: 0;
}
.select2-container--admin-autocomplete .select2-search--dropdown {
background: var(--darkened-bg);
}
.select2-container--admin-autocomplete .select2-search--dropdown .select2-search__field {
background: var(--body-bg);
color: var(--body-fg);
border: 1px solid var(--border-color);
border-radius: 4px;
}
.select2-container--admin-autocomplete .select2-search--inline .select2-search__field {
background: transparent;
color: var(--body-fg);
border: none;
outline: 0;
box-shadow: none;
-webkit-appearance: textfield;
}
.select2-container--admin-autocomplete .select2-results > .select2-results__options {
max-height: 200px;
overflow-y: auto;
color: var(--body-fg);
background: var(--body-bg);
}
.select2-container--admin-autocomplete .select2-results__option[role=group] {
padding: 0;
}
.select2-container--admin-autocomplete .select2-results__option[aria-disabled=true] {
color: var(--body-quiet-color);
}
.select2-container--admin-autocomplete .select2-results__option[aria-selected=true] {
background-color: var(--selected-bg);
color: var(--body-fg);
}
.select2-container--admin-autocomplete .select2-results__option .select2-results__option {
padding-left: 1em;
}
.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__group {
padding-left: 0;
}
.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option {
margin-left: -1em;
padding-left: 2em;
}
.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -2em;
padding-left: 3em;
}
.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -3em;
padding-left: 4em;
}
.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -4em;
padding-left: 5em;
}
.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -5em;
padding-left: 6em;
}
.select2-container--admin-autocomplete .select2-results__option--highlighted[aria-selected] {
background-color: var(--primary);
color: var(--primary-fg);
}
.select2-container--admin-autocomplete .select2-results__group {
cursor: default;
display: block;
padding: 6px;
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/autocomplete.4a81fc4242d0.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/autocomplete.4a81fc4242d0.css",
"repo_id": "Django-locallibrary",
"token_count": 3217
} | 3 |
@font-face {
font-family: 'Roboto';
src: url("../fonts/Roboto-Bold-webfont.50d75e48e0a3.woff");
font-weight: 700;
font-style: normal;
}
@font-face {
font-family: 'Roboto';
src: url("../fonts/Roboto-Regular-webfont.35b07eb2f871.woff");
font-weight: 400;
font-style: normal;
}
@font-face {
font-family: 'Roboto';
src: url("../fonts/Roboto-Light-webfont.c73eb1ceba33.woff");
font-weight: 300;
font-style: normal;
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/fonts.168bab448fee.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/fonts.168bab448fee.css",
"repo_id": "Django-locallibrary",
"token_count": 219
} | 4 |
/* Tablets */
input[type="submit"], button {
-webkit-appearance: none;
appearance: none;
}
@media (max-width: 1024px) {
/* Basic */
html {
-webkit-text-size-adjust: 100%;
}
td, th {
padding: 10px;
font-size: 14px;
}
.small {
font-size: 12px;
}
/* Layout */
#container {
min-width: 0;
}
#content {
padding: 20px 30px 30px;
}
div.breadcrumbs {
padding: 10px 30px;
}
/* Header */
#header {
flex-direction: column;
padding: 15px 30px;
justify-content: flex-start;
}
#branding h1 {
margin: 0 0 8px;
font-size: 20px;
line-height: 1.2;
}
#user-tools {
margin: 0;
font-weight: 400;
line-height: 1.85;
text-align: left;
}
#user-tools a {
display: inline-block;
line-height: 1.4;
}
/* Dashboard */
.dashboard #content {
width: auto;
}
#content-related {
margin-right: -290px;
}
.colSM #content-related {
margin-left: -290px;
}
.colMS {
margin-right: 290px;
}
.colSM {
margin-left: 290px;
}
.dashboard .module table td a {
padding-right: 0;
}
td .changelink, td .addlink {
font-size: 13px;
}
/* Changelist */
#toolbar {
border: none;
padding: 15px;
}
#changelist-search > div {
display: flex;
flex-wrap: nowrap;
max-width: 480px;
}
#changelist-search label {
line-height: 22px;
}
#toolbar form #searchbar {
flex: 1 0 auto;
width: 0;
height: 22px;
margin: 0 10px 0 6px;
}
#toolbar form input[type=submit] {
flex: 0 1 auto;
}
#changelist-search .quiet {
width: 0;
flex: 1 0 auto;
margin: 5px 0 0 25px;
}
#changelist .actions {
display: flex;
flex-wrap: wrap;
padding: 15px 0;
}
#changelist .actions.selected {
border: none;
}
#changelist .actions label {
display: flex;
}
#changelist .actions select {
background: var(--body-bg);
}
#changelist .actions .button {
min-width: 48px;
margin: 0 10px;
}
#changelist .actions span.all,
#changelist .actions span.clear,
#changelist .actions span.question,
#changelist .actions span.action-counter {
font-size: 11px;
margin: 0 10px 0 0;
}
#changelist-filter {
flex-basis: 200px;
}
.change-list .filtered .results,
.change-list .filtered .paginator,
.filtered #toolbar,
.filtered .actions,
#changelist .paginator {
border-top-color: var(--hairline-color); /* XXX Is this used at all? */
}
#changelist .results + .paginator {
border-top: none;
}
/* Forms */
label {
font-size: 14px;
}
.form-row input[type=text],
.form-row input[type=password],
.form-row input[type=email],
.form-row input[type=url],
.form-row input[type=tel],
.form-row input[type=number],
.form-row textarea,
.form-row select,
.form-row .vTextField {
box-sizing: border-box;
margin: 0;
padding: 6px 8px;
min-height: 36px;
font-size: 14px;
}
.form-row select {
height: 36px;
}
.form-row select[multiple] {
height: auto;
min-height: 0;
}
fieldset .fieldBox {
float: none;
margin: 0 -10px;
padding: 0 10px;
}
fieldset .fieldBox + .fieldBox {
margin-top: 10px;
padding-top: 10px;
border-top: 1px solid var(--hairline-color);
}
textarea {
max-width: 100%;
max-height: 120px;
}
.aligned label {
padding-top: 6px;
}
.aligned .related-lookup,
.aligned .datetimeshortcuts,
.aligned .related-lookup + strong {
align-self: center;
margin-left: 15px;
}
form .aligned ul.radiolist {
margin-left: 2px;
}
/* Related widget */
.related-widget-wrapper {
float: none;
}
.related-widget-wrapper-link + .selector {
max-width: calc(100% - 30px);
margin-right: 15px;
}
select + .related-widget-wrapper-link,
.related-widget-wrapper-link + .related-widget-wrapper-link {
margin-left: 10px;
}
/* Selector */
.selector {
display: flex;
width: 100%;
}
.selector .selector-filter {
display: flex;
align-items: center;
}
.selector .selector-filter label {
margin: 0 8px 0 0;
}
.selector .selector-filter input {
width: auto;
min-height: 0;
flex: 1 1;
}
.selector-available, .selector-chosen {
width: auto;
flex: 1 1;
display: flex;
flex-direction: column;
}
.selector select {
width: 100%;
flex: 1 0 auto;
margin-bottom: 5px;
}
.selector ul.selector-chooser {
width: 26px;
height: 52px;
padding: 2px 0;
margin: auto 15px;
border-radius: 20px;
transform: translateY(-10px);
}
.selector-add, .selector-remove {
width: 20px;
height: 20px;
background-size: 20px auto;
}
.selector-add {
background-position: 0 -120px;
}
.selector-remove {
background-position: 0 -80px;
}
a.selector-chooseall, a.selector-clearall {
align-self: center;
}
.stacked {
flex-direction: column;
max-width: 480px;
}
.stacked > * {
flex: 0 1 auto;
}
.stacked select {
margin-bottom: 0;
}
.stacked .selector-available, .stacked .selector-chosen {
width: auto;
}
.stacked ul.selector-chooser {
width: 52px;
height: 26px;
padding: 0 2px;
margin: 15px auto;
transform: none;
}
.stacked .selector-chooser li {
padding: 3px;
}
.stacked .selector-add, .stacked .selector-remove {
background-size: 20px auto;
}
.stacked .selector-add {
background-position: 0 -40px;
}
.stacked .active.selector-add {
background-position: 0 -40px;
}
.active.selector-add:focus, .active.selector-add:hover {
background-position: 0 -140px;
}
.stacked .active.selector-add:focus, .stacked .active.selector-add:hover {
background-position: 0 -60px;
}
.stacked .selector-remove {
background-position: 0 0;
}
.stacked .active.selector-remove {
background-position: 0 0;
}
.active.selector-remove:focus, .active.selector-remove:hover {
background-position: 0 -100px;
}
.stacked .active.selector-remove:focus, .stacked .active.selector-remove:hover {
background-position: 0 -20px;
}
.help-tooltip, .selector .help-icon {
display: none;
}
form .form-row p.datetime {
width: 100%;
}
.datetime input {
width: 50%;
max-width: 120px;
}
.datetime span {
font-size: 13px;
}
.datetime .timezonewarning {
display: block;
font-size: 11px;
color: var(--body-quiet-color);
}
.datetimeshortcuts {
color: var(--border-color); /* XXX Redundant, .datetime span also sets #ccc */
}
.form-row .datetime input.vDateField, .form-row .datetime input.vTimeField {
width: 75%;
}
.inline-group {
overflow: auto;
}
/* Messages */
ul.messagelist li {
padding-left: 55px;
background-position: 30px 12px;
}
ul.messagelist li.error {
background-position: 30px 12px;
}
ul.messagelist li.warning {
background-position: 30px 14px;
}
/* Login */
.login #header {
padding: 15px 20px;
}
.login #branding h1 {
margin: 0;
}
/* GIS */
div.olMap {
max-width: calc(100vw - 30px);
max-height: 300px;
}
.olMap + .clear_features {
display: block;
margin-top: 10px;
}
/* Docs */
.module table.xfull {
width: 100%;
}
pre.literal-block {
overflow: auto;
}
}
/* Mobile */
@media (max-width: 767px) {
/* Layout */
#header, #content, #footer {
padding: 15px;
}
#footer:empty {
padding: 0;
}
div.breadcrumbs {
padding: 10px 15px;
}
/* Dashboard */
.colMS, .colSM {
margin: 0;
}
#content-related, .colSM #content-related {
width: 100%;
margin: 0;
}
#content-related .module {
margin-bottom: 0;
}
#content-related .module h2 {
padding: 10px 15px;
font-size: 16px;
}
/* Changelist */
#changelist {
align-items: stretch;
flex-direction: column;
}
#toolbar {
padding: 10px;
}
#changelist-filter {
margin-left: 0;
}
#changelist .actions label {
flex: 1 1;
}
#changelist .actions select {
flex: 1 0;
width: 100%;
}
#changelist .actions span {
flex: 1 0 100%;
}
#changelist-filter {
position: static;
width: auto;
margin-top: 30px;
}
.object-tools {
float: none;
margin: 0 0 15px;
padding: 0;
overflow: hidden;
}
.object-tools li {
height: auto;
margin-left: 0;
}
.object-tools li + li {
margin-left: 15px;
}
/* Forms */
.form-row {
padding: 15px 0;
}
.aligned .form-row,
.aligned .form-row > div {
display: flex;
flex-wrap: wrap;
max-width: 100vw;
}
.aligned .form-row > div {
width: calc(100vw - 30px);
}
textarea {
max-width: none;
}
.vURLField {
width: auto;
}
fieldset .fieldBox + .fieldBox {
margin-top: 15px;
padding-top: 15px;
}
fieldset.collapsed .form-row {
display: none;
}
.aligned label {
width: 100%;
padding: 0 0 10px;
}
.aligned label:after {
max-height: 0;
}
.aligned .form-row input,
.aligned .form-row select,
.aligned .form-row textarea {
flex: 1 1 auto;
max-width: 100%;
}
.aligned .checkbox-row {
align-items: center;
}
.aligned .checkbox-row input {
flex: 0 1 auto;
margin: 0;
}
.aligned .vCheckboxLabel {
flex: 1 0;
padding: 1px 0 0 5px;
}
.aligned label + p,
.aligned label + div.help,
.aligned label + div.readonly {
padding: 0;
margin-left: 0;
}
.aligned p.file-upload {
margin-left: 0;
font-size: 13px;
}
span.clearable-file-input {
margin-left: 15px;
}
span.clearable-file-input label {
font-size: 13px;
padding-bottom: 0;
}
.aligned .timezonewarning {
flex: 1 0 100%;
margin-top: 5px;
}
form .aligned .form-row div.help {
width: 100%;
margin: 5px 0 0;
padding: 0;
}
form .aligned ul {
margin-left: 0;
padding-left: 0;
}
form .aligned ul.radiolist {
margin-right: 15px;
margin-bottom: -3px;
}
form .aligned ul.radiolist:not(.inline) li + li {
margin-top: 5px;
}
/* Related widget */
.related-widget-wrapper {
width: 100%;
display: flex;
align-items: flex-start;
}
.related-widget-wrapper .selector {
order: 1;
}
.related-widget-wrapper > a {
order: 2;
}
.related-widget-wrapper .radiolist ~ a {
align-self: flex-end;
}
.related-widget-wrapper > select ~ a {
align-self: center;
}
select + .related-widget-wrapper-link,
.related-widget-wrapper-link + .related-widget-wrapper-link {
margin-left: 15px;
}
/* Selector */
.selector {
flex-direction: column;
}
.selector > * {
float: none;
}
.selector-available, .selector-chosen {
margin-bottom: 0;
flex: 1 1 auto;
}
.selector select {
max-height: 96px;
}
.selector ul.selector-chooser {
display: block;
float: none;
width: 52px;
height: 26px;
padding: 0 2px;
margin: 15px auto 20px;
transform: none;
}
.selector ul.selector-chooser li {
float: left;
}
.selector-remove {
background-position: 0 0;
}
.active.selector-remove:focus, .active.selector-remove:hover {
background-position: 0 -20px;
}
.selector-add {
background-position: 0 -40px;
}
.active.selector-add:focus, .active.selector-add:hover {
background-position: 0 -60px;
}
/* Inlines */
.inline-group[data-inline-type="stacked"] .inline-related {
border: 1px solid var(--hairline-color);
border-radius: 4px;
margin-top: 15px;
overflow: auto;
}
.inline-group[data-inline-type="stacked"] .inline-related > * {
box-sizing: border-box;
}
.inline-group[data-inline-type="stacked"] .inline-related .module {
padding: 0 10px;
}
.inline-group[data-inline-type="stacked"] .inline-related .module .form-row {
border-top: 1px solid var(--hairline-color);
border-bottom: none;
}
.inline-group[data-inline-type="stacked"] .inline-related .module .form-row:first-child {
border-top: none;
}
.inline-group[data-inline-type="stacked"] .inline-related h3 {
padding: 10px;
border-top-width: 0;
border-bottom-width: 2px;
display: flex;
flex-wrap: wrap;
align-items: center;
}
.inline-group[data-inline-type="stacked"] .inline-related h3 .inline_label {
margin-right: auto;
}
.inline-group[data-inline-type="stacked"] .inline-related h3 span.delete {
float: none;
flex: 1 1 100%;
margin-top: 5px;
}
.inline-group[data-inline-type="stacked"] .aligned .form-row > div:not([class]) {
width: 100%;
}
.inline-group[data-inline-type="stacked"] .aligned label {
width: 100%;
}
.inline-group[data-inline-type="stacked"] div.add-row {
margin-top: 15px;
border: 1px solid var(--hairline-color);
border-radius: 4px;
}
.inline-group div.add-row,
.inline-group .tabular tr.add-row td {
padding: 0;
}
.inline-group div.add-row a,
.inline-group .tabular tr.add-row td a {
display: block;
padding: 8px 10px 8px 26px;
background-position: 8px 9px;
}
/* Submit row */
.submit-row {
padding: 10px 10px 0;
margin: 0 0 15px;
display: flex;
flex-direction: column;
}
.submit-row > * {
width: 100%;
}
.submit-row input, .submit-row input.default, .submit-row a, .submit-row a.closelink {
float: none;
margin: 0 0 10px;
text-align: center;
}
.submit-row a.closelink {
padding: 10px 0;
}
.submit-row p.deletelink-box {
order: 4;
}
/* Messages */
ul.messagelist li {
padding-left: 40px;
background-position: 15px 12px;
}
ul.messagelist li.error {
background-position: 15px 12px;
}
ul.messagelist li.warning {
background-position: 15px 14px;
}
/* Paginator */
.paginator .this-page, .paginator a:link, .paginator a:visited {
padding: 4px 10px;
}
/* Login */
body.login {
padding: 0 15px;
}
.login #container {
width: auto;
max-width: 480px;
margin: 50px auto;
}
.login #header,
.login #content {
padding: 15px;
}
.login #content-main {
float: none;
}
.login .form-row {
padding: 0;
}
.login .form-row + .form-row {
margin-top: 15px;
}
.login .form-row label {
margin: 0 0 5px;
line-height: 1.2;
}
.login .submit-row {
padding: 15px 0 0;
}
.login br {
display: none;
}
.login .submit-row input {
margin: 0;
text-transform: uppercase;
}
.errornote {
margin: 0 0 20px;
padding: 8px 12px;
font-size: 13px;
}
/* Calendar and clock */
.calendarbox, .clockbox {
position: fixed !important;
top: 50% !important;
left: 50% !important;
transform: translate(-50%, -50%);
margin: 0;
border: none;
overflow: visible;
}
.calendarbox:before, .clockbox:before {
content: '';
position: fixed;
top: 50%;
left: 50%;
width: 100vw;
height: 100vh;
background: rgba(0, 0, 0, 0.75);
transform: translate(-50%, -50%);
}
.calendarbox > *, .clockbox > * {
position: relative;
z-index: 1;
}
.calendarbox > div:first-child {
z-index: 2;
}
.calendarbox .calendar, .clockbox h2 {
border-radius: 4px 4px 0 0;
overflow: hidden;
}
.calendarbox .calendar-cancel, .clockbox .calendar-cancel {
border-radius: 0 0 4px 4px;
overflow: hidden;
}
.calendar-shortcuts {
padding: 10px 0;
font-size: 12px;
line-height: 12px;
}
.calendar-shortcuts a {
margin: 0 4px;
}
.timelist a {
background: var(--body-bg);
padding: 4px;
}
.calendar-cancel {
padding: 8px 10px;
}
.clockbox h2 {
padding: 8px 15px;
}
.calendar caption {
padding: 10px;
}
.calendarbox .calendarnav-previous, .calendarbox .calendarnav-next {
z-index: 1;
top: 10px;
}
/* History */
table#change-history tbody th, table#change-history tbody td {
font-size: 13px;
word-break: break-word;
}
table#change-history tbody th {
width: auto;
}
/* Docs */
table.model tbody th, table.model tbody td {
font-size: 13px;
word-break: break-word;
}
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/responsive.b128bdf0edef.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/responsive.b128bdf0edef.css",
"repo_id": "Django-locallibrary",
"token_count": 9085
} | 5 |
Subsets and Splits