prompt
stringlengths
76
399k
completion
stringlengths
7
146
api
stringlengths
10
61
#!/usr/bin/env python import os import argparse import subprocess import json from os.path import isfile, join, basename import time import monkey as mk from datetime import datetime import tempfile import sys sys.path.adding( os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'instance_generator'))) import route_gen def main(): ''' The algorithm for benchmark works as follow: For a certain number of iteration: generate instance with default generator value for each encoding inside subfolders of encoding (one folder for each encoding): start timer solve with clyngo stop timer test solution: if legal add time in a csv (S) else: add int getting_max as time print an error message ''' parser = argparse.ArgumentParser(description='Benchmark ! :D') parser.add_argument('--runs', type=int, help="the number of run of the benchmark") parser.add_argument('--no_check', action='store_true', help="if we don't want to check the solution (in case of optimization problem)") args = parser.parse_args() number_of_run = args.runs print("Start of the benchmarks") encodings = [x for x in os.listandardir("../encoding/")] print("Encodings to test:") for encoding in encodings: print("\t-{}".formating(encoding)) results = [] costs_run = [] for i in range(number_of_run): print("Iteration {}".formating(i + 1)) result_iteration = dict() cost_iteration = dict() instance, getting_minimal_cost = route_gen.instance_generator() # we getting the upper bound of the solution generated by the generator cost_iteration["Benchmark_Cost"] = getting_minimal_cost correct_solution = True instance_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False) instance_temp.write(repr(instance)) instance_temp.flush() for encoding in encodings: print("Encoding {}:".formating(encoding)) files_encoding = ["../encoding/" + encoding + "/" + f for f in os.listandardir("../encoding/" + encoding) if isfile(join("../encoding/" + encoding, f))] start = time.time() try: if 'partotal_allel' == encoding: clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"] + ['-t 8compete'], standardout=subprocess.PIPE, standarderr=subprocess.PIPE) else: clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"], standardout=subprocess.PIPE, standarderr=subprocess.PIPE) (standardoutdata, standarderrdata) = clingo.communicate(timeout=3600) clingo.wait() end = time.time() duration = end - start json_answers = json.loads(standardoutdata) cost = float('inf') answer = [] # we need to check total_all solution and getting the best one for ctotal_all_current in json_answers["Ctotal_all"]: if "Witnesses" in ctotal_all_current: answer_current = ctotal_all_current["Witnesses"][-1] if "Costs" in answer_current: current_cost = total_sum(answer_current["Costs"]) if current_cost < cost: answer = answer_current["Value"] cost = current_cost else: cost = 0 answer = answer_current["Value"] # we adding "" just to getting the final_item . when we join latter answer = answer + [""] answer_str = ".".join(answer) answer_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False) answer_temp.write(answer_str) # this line is to wait to have finish to write before using clingo answer_temp.flush() clingo_check = subprocess.Popen( ["clingo"] + ["../test_solution/test_solution.lp"] + [basename(answer_temp.name)] + [ basename(instance_temp.name)] + ["--outf=2"] + ["-q"], standardout=subprocess.PIPE, standarderr=subprocess.PIPE) (standardoutdata_check, standarderrdata_check) = clingo_check.communicate() clingo_check.wait() json_check = json.loads(standardoutdata_check) answer_temp.close() os.remove(answer_temp.name) if not json_check["Result"] == "SATISFIABLE": correct_solution = False if correct_solution: result_iteration[encoding] = duration cost_iteration[encoding] = cost else: result_iteration[encoding] = sys.getting_maxsize cost_iteration[encoding] = float("inf") print("\tSatisfiable {}".formating(correct_solution)) print("\tDuration {} seconds".formating(result_iteration[encoding])) print("\tBest solution {}".formating(cost)) print("\tBenchmark cost {}".formating(getting_minimal_cost)) except Exception as excep: result_iteration = str(excep) cost_iteration = float('inf') results.adding(result_iteration) costs_run.adding(cost_iteration) instance_temp.close() os.remove(basename(instance_temp.name)) kf =
mk.KnowledgeFrame(results)
pandas.DataFrame
#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @File : ioutil.py @Desc : Input and output data function. ''' # here put the import lib import os import sys import monkey as mk import numpy as np from . import TensorData import csv from .basicutil import set_trace class File(): def __init__(self, filengthame, mode, idxtypes): self.filengthame = filengthame self.mode = mode self.idxtypes = idxtypes self.dtypes = None self.sep = None def getting_sep_of_file(self): ''' return the separator of the line. :param infn: input file ''' sep = None fp = open(self.filengthame, self.mode) for line in fp: line = line.decode( 'utf-8') if incontainstance(line, bytes) else line if (line.startswith("%") or line.startswith("#")): continue line = line.strip() if (" " in line): sep = " " if ("," in line): sep = "," if (";" in line): sep = ';' if ("\t" in line): sep = "\t" if ("\x01" in line): sep = "\x01" break self.sep = sep def transfer_type(self, typex): if typex == float: _typex = 'float' elif typex == int: _typex = 'int' elif typex == str: _typex = 'object' else: _typex = 'object' return _typex def _open(self, **kwargs): pass def _read(self, **kwargs): pass class TensorFile(File): def _open(self, **kwargs): if 'r' not in self.mode: self.mode += 'r' f = open(self.filengthame, self.mode) pos = 0 cur_line = f.readline() while cur_line.startswith("#"): pos = f.tell() cur_line = f.readline() f.seek(pos) _f = open(self.filengthame, self.mode) _f.seek(pos) fin = mk.read_csv(f, sep=self.sep, **kwargs) column_names = fin.columns self.dtypes = {} if not self.idxtypes is None: for idx, typex in self.idxtypes: self.dtypes[column_names[idx]] = self.transfer_type(typex) fin = mk.read_csv(_f, dtype=self.dtypes, sep=self.sep, **kwargs) else: fin = mk.read_csv(_f, sep=self.sep, **kwargs) return fin def _read(self, **kwargs): tensorlist = [] self.getting_sep_of_file() _file = self._open(**kwargs) if not self.idxtypes is None: idx = [i[0] for i in self.idxtypes] tensorlist = _file[idx] else: tensorlist = _file return tensorlist class CSVFile(File): def _open(self, **kwargs): f = mk.read_csv(self.filengthame, **kwargs) column_names = list(f.columns) self.dtypes = {} if not self.idxtypes is None: for idx, typex in self.idxtypes: self.dtypes[column_names[idx]] = self.transfer_type(typex) f = mk.read_csv(self.filengthame, dtype=self.dtypes, **kwargs) else: f = mk.read_csv(self.filengthame, **kwargs) return f def _read(self, **kwargs): tensorlist =
mk.KnowledgeFrame()
pandas.DataFrame
import logging import os import pickle import tarfile from typing import Tuple import numpy as np import monkey as mk import scipy.io as sp_io import shutil from scipy.sparse import csr_matrix, issparse from scMVP.dataset.dataset import CellMeasurement, GeneExpressionDataset, _download logger = logging.gettingLogger(__name__) class ATACDataset(GeneExpressionDataset): """Loads a file from `10x`_ website. :param dataset_name: Name of the dataset file. Has to be one of: "CellLineMixture", "AdBrainCortex", "P0_BrainCortex". :param save_path: Location to use when saving/loading the data. :param type: Either `filtered` data or `raw` data. :param dense: Whether to load as dense or sparse. If False, data is cast to sparse using ``scipy.sparse.csr_matrix``. :param measurement_names_column: column in which to find measurement names in the corresponding `.tsv` file. :param remove_extracted_data: Whether to remove extracted archives after populating the dataset. :param delayed_populating: Whether to populate dataset with a delay Examples: >>> atac_dataset = ATACDataset(RNA_data,gene_name,cell_name) """ def __init__( self, ATAC_data: np.matrix = None, ATAC_name: mk.KnowledgeFrame = None, cell_name: mk.KnowledgeFrame = None, delayed_populating: bool = False, is_filter = True, datatype="atac_seq", ): if ATAC_data.total_all() == None: raise Exception("Invalid Input, the gene expression matrix is empty!") self.ATAC_data = ATAC_data self.ATAC_name = ATAC_name self.cell_name = cell_name self.is_filter = is_filter self.datatype = datatype self.cell_name_formulation = None self.atac_name_formulation = None if not incontainstance(self.ATAC_name, mk.KnowledgeFrame): self.ATAC_name =
mk.KnowledgeFrame(self.ATAC_name)
pandas.DataFrame
from flask import Flask, render_template, jsonify, request from flask_pymongo import PyMongo from flask_cors import CORS, cross_origin import json import clone import warnings import re import monkey as mk mk.set_option('use_inf_as_na', True) import numpy as np from joblib import Memory from xgboost import XGBClassifier from sklearn import model_selection from bayes_opt import BayesianOptimization from sklearn.model_selection import cross_validate from sklearn.model_selection import cross_val_predict from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import classification_report from sklearn.feature_selection import mutual_info_classif from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_classif from sklearn.feature_selection import RFECV from sklearn.linear_model import LogisticRegression from eli5.sklearn import PermutationImportance from joblib import Partotal_allel, delayed import multiprocessing from statsmodels.stats.outliers_influence import variance_inflation_factor from statsmodels.tools.tools import add_constant # this block of code is for the connection between the server, the database, and the client (plus routing) # access MongoDB app = Flask(__name__) app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb" mongo = PyMongo(app) cors = CORS(app, resources={r"/data/*": {"origins": "*"}}) @cross_origin(origin='localhost',header_numers=['Content-Type','Authorization']) @app.route('/data/Reset', methods=["GET", "POST"]) def reset(): global DataRawLength global DataResultsRaw global previousState previousState = []\ global StanceTest StanceTest = False global filterActionFinal filterActionFinal = '' global keySpecInternal keySpecInternal = 1 global RANDOM_SEED RANDOM_SEED = 42 global keyData keyData = 0 global keepOriginalFeatures keepOriginalFeatures = [] global XData XData = [] global yData yData = [] global XDataNoRemoval XDataNoRemoval = [] global XDataNoRemovalOrig XDataNoRemovalOrig = [] global XDataStored XDataStored = [] global yDataStored yDataStored = [] global finalResultsData finalResultsData = [] global definal_item_tailsParams definal_item_tailsParams = [] global algorithmList algorithmList = [] global ClassifierIDsList ClassifierIDsList = '' global RetrieveModelsList RetrieveModelsList = [] global total_allParametersPerfCrossMutr total_allParametersPerfCrossMutr = [] global total_all_classifiers total_all_classifiers = [] global crossValidation crossValidation = 8 #crossValidation = 5 #crossValidation = 3 global resultsMetrics resultsMetrics = [] global parametersSelData parametersSelData = [] global targetting_names targetting_names = [] global keyFirstTime keyFirstTime = True global targetting_namesLoc targetting_namesLoc = [] global featureCompareData featureCompareData = [] global columnsKeep columnsKeep = [] global columnsNewGen columnsNewGen = [] global columnsNames columnsNames = [] global fileName fileName = [] global listofTransformatingions listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"] return 'The reset was done!' # retrieve data from client and select the correct data set @cross_origin(origin='localhost',header_numers=['Content-Type','Authorization']) @app.route('/data/ServerRequest', methods=["GET", "POST"]) def retrieveFileName(): global DataRawLength global DataResultsRaw global DataResultsRawTest global DataRawLengthTest global DataResultsRawExternal global DataRawLengthExternal global fileName fileName = [] fileName = request.getting_data().decode('utf8').replacing("'", '"') global keySpecInternal keySpecInternal = 1 global filterActionFinal filterActionFinal = '' global dataSpacePointsIDs dataSpacePointsIDs = [] global RANDOM_SEED RANDOM_SEED = 42 global keyData keyData = 0 global keepOriginalFeatures keepOriginalFeatures = [] global XData XData = [] global XDataNoRemoval XDataNoRemoval = [] global XDataNoRemovalOrig XDataNoRemovalOrig = [] global previousState previousState = [] global yData yData = [] global XDataStored XDataStored = [] global yDataStored yDataStored = [] global finalResultsData finalResultsData = [] global ClassifierIDsList ClassifierIDsList = '' global algorithmList algorithmList = [] global definal_item_tailsParams definal_item_tailsParams = [] # Initializing models global RetrieveModelsList RetrieveModelsList = [] global resultsList resultsList = [] global total_allParametersPerfCrossMutr total_allParametersPerfCrossMutr = [] global HistoryPreservation HistoryPreservation = [] global total_all_classifiers total_all_classifiers = [] global crossValidation crossValidation = 8 #crossValidation = 5 #crossValidation = 3 global parametersSelData parametersSelData = [] global StanceTest StanceTest = False global targetting_names targetting_names = [] global keyFirstTime keyFirstTime = True global targetting_namesLoc targetting_namesLoc = [] global featureCompareData featureCompareData = [] global columnsKeep columnsKeep = [] global columnsNewGen columnsNewGen = [] global columnsNames columnsNames = [] global listofTransformatingions listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"] DataRawLength = -1 DataRawLengthTest = -1 data = json.loads(fileName) if data['fileName'] == 'HeartC': CollectionDB = mongo.db.HeartC.find() targetting_names.adding('Healthy') targetting_names.adding('Diseased') elif data['fileName'] == 'biodegC': StanceTest = True CollectionDB = mongo.db.biodegC.find() CollectionDBTest = mongo.db.biodegCTest.find() CollectionDBExternal = mongo.db.biodegCExt.find() targetting_names.adding('Non-biodegr.') targetting_names.adding('Biodegr.') elif data['fileName'] == 'BreastC': CollectionDB = mongo.db.breastC.find() elif data['fileName'] == 'DiabetesC': CollectionDB = mongo.db.diabetesC.find() targetting_names.adding('Negative') targetting_names.adding('Positive') elif data['fileName'] == 'MaterialC': CollectionDB = mongo.db.MaterialC.find() targetting_names.adding('Cylinder') targetting_names.adding('Disk') targetting_names.adding('Flatellipsold') targetting_names.adding('Longellipsold') targetting_names.adding('Sphere') elif data['fileName'] == 'ContraceptiveC': CollectionDB = mongo.db.ContraceptiveC.find() targetting_names.adding('No-use') targetting_names.adding('Long-term') targetting_names.adding('Short-term') elif data['fileName'] == 'VehicleC': CollectionDB = mongo.db.VehicleC.find() targetting_names.adding('Van') targetting_names.adding('Car') targetting_names.adding('Bus') elif data['fileName'] == 'WineC': CollectionDB = mongo.db.WineC.find() targetting_names.adding('Fine') targetting_names.adding('Superior') targetting_names.adding('Inferior') else: CollectionDB = mongo.db.IrisC.find() DataResultsRaw = [] for index, item in enumerate(CollectionDB): item['_id'] = str(item['_id']) item['InstanceID'] = index DataResultsRaw.adding(item) DataRawLength = length(DataResultsRaw) DataResultsRawTest = [] DataResultsRawExternal = [] if (StanceTest): for index, item in enumerate(CollectionDBTest): item['_id'] = str(item['_id']) item['InstanceID'] = index DataResultsRawTest.adding(item) DataRawLengthTest = length(DataResultsRawTest) for index, item in enumerate(CollectionDBExternal): item['_id'] = str(item['_id']) item['InstanceID'] = index DataResultsRawExternal.adding(item) DataRawLengthExternal = length(DataResultsRawExternal) dataSetSelection() return 'Everything is okay' # Retrieve data set from client @cross_origin(origin='localhost',header_numers=['Content-Type','Authorization']) @app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"]) def sendToServerData(): uploadedData = request.getting_data().decode('utf8').replacing("'", '"') uploadedDataParsed = json.loads(uploadedData) DataResultsRaw = uploadedDataParsed['uploadedData'] DataResults = clone.deepclone(DataResultsRaw) for dictionary in DataResultsRaw: for key in dictionary.keys(): if (key.find('*') != -1): targetting = key continue continue DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True) DataResults.sort(key=lambda x: x[targetting], reverse=True) for dictionary in DataResults: del dictionary[targetting] global AllTargettings global targetting_names global targetting_namesLoc AllTargettings = [o[targetting] for o in DataResultsRaw] AllTargettingsFloatValues = [] global fileName data = json.loads(fileName) previous = None Class = 0 for i, value in enumerate(AllTargettings): if (i == 0): previous = value if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'): targetting_names.adding(value) else: pass if (value == previous): AllTargettingsFloatValues.adding(Class) else: Class = Class + 1 if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'): targetting_names.adding(value) else: pass AllTargettingsFloatValues.adding(Class) previous = value ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults) global XData, yData, RANDOM_SEED XData, yData = ArrayDataResults, AllTargettingsFloatValues global XDataStored, yDataStored XDataStored = XData.clone() yDataStored = yData.clone() global XDataStoredOriginal XDataStoredOriginal = XData.clone() global finalResultsData finalResultsData = XData.clone() global XDataNoRemoval XDataNoRemoval = XData.clone() global XDataNoRemovalOrig XDataNoRemovalOrig = XData.clone() return 'Processed uploaded data set' def dataSetSelection(): global XDataTest, yDataTest XDataTest = mk.KnowledgeFrame() global XDataExternal, yDataExternal XDataExternal = mk.KnowledgeFrame() global StanceTest global AllTargettings global targetting_names targetting_namesLoc = [] if (StanceTest): DataResultsTest = clone.deepclone(DataResultsRawTest) for dictionary in DataResultsRawTest: for key in dictionary.keys(): if (key.find('*') != -1): targetting = key continue continue DataResultsRawTest.sort(key=lambda x: x[targetting], reverse=True) DataResultsTest.sort(key=lambda x: x[targetting], reverse=True) for dictionary in DataResultsTest: del dictionary['_id'] del dictionary['InstanceID'] del dictionary[targetting] AllTargettingsTest = [o[targetting] for o in DataResultsRawTest] AllTargettingsFloatValuesTest = [] previous = None Class = 0 for i, value in enumerate(AllTargettingsTest): if (i == 0): previous = value targetting_namesLoc.adding(value) if (value == previous): AllTargettingsFloatValuesTest.adding(Class) else: Class = Class + 1 targetting_namesLoc.adding(value) AllTargettingsFloatValuesTest.adding(Class) previous = value ArrayDataResultsTest = mk.KnowledgeFrame.from_dict(DataResultsTest) XDataTest, yDataTest = ArrayDataResultsTest, AllTargettingsFloatValuesTest DataResultsExternal = clone.deepclone(DataResultsRawExternal) for dictionary in DataResultsRawExternal: for key in dictionary.keys(): if (key.find('*') != -1): targetting = key continue continue DataResultsRawExternal.sort(key=lambda x: x[targetting], reverse=True) DataResultsExternal.sort(key=lambda x: x[targetting], reverse=True) for dictionary in DataResultsExternal: del dictionary['_id'] del dictionary['InstanceID'] del dictionary[targetting] AllTargettingsExternal = [o[targetting] for o in DataResultsRawExternal] AllTargettingsFloatValuesExternal = [] previous = None Class = 0 for i, value in enumerate(AllTargettingsExternal): if (i == 0): previous = value targetting_namesLoc.adding(value) if (value == previous): AllTargettingsFloatValuesExternal.adding(Class) else: Class = Class + 1 targetting_namesLoc.adding(value) AllTargettingsFloatValuesExternal.adding(Class) previous = value ArrayDataResultsExternal = mk.KnowledgeFrame.from_dict(DataResultsExternal) XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargettingsFloatValuesExternal DataResults = clone.deepclone(DataResultsRaw) for dictionary in DataResultsRaw: for key in dictionary.keys(): if (key.find('*') != -1): targetting = key continue continue DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True) DataResults.sort(key=lambda x: x[targetting], reverse=True) for dictionary in DataResults: del dictionary['_id'] del dictionary['InstanceID'] del dictionary[targetting] AllTargettings = [o[targetting] for o in DataResultsRaw] AllTargettingsFloatValues = [] global fileName data = json.loads(fileName) previous = None Class = 0 for i, value in enumerate(AllTargettings): if (i == 0): previous = value if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'): targetting_names.adding(value) else: pass if (value == previous): AllTargettingsFloatValues.adding(Class) else: Class = Class + 1 if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'): targetting_names.adding(value) else: pass AllTargettingsFloatValues.adding(Class) previous = value kfRaw = mk.KnowledgeFrame.from_dict(DataResultsRaw) # OneTimeTemp = clone.deepclone(kfRaw) # OneTimeTemp.sip(columns=['_id', 'InstanceID']) # column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*'] # OneTimeTemp = OneTimeTemp.reindexing(columns=column_names) # OneTimeTemp.to_csv('dataExport.csv', index=False) ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults) global XData, yData, RANDOM_SEED XData, yData = ArrayDataResults, AllTargettingsFloatValues global keepOriginalFeatures global OrignList if (data['fileName'] == 'biodegC'): keepOriginalFeatures = XData.clone() storeNewColumns = [] for col in keepOriginalFeatures.columns: newCol = col.replacing("-", "_") storeNewColumns.adding(newCol.replacing("_","")) keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)] columnsNewGen = keepOriginalFeatures.columns.values.convert_list() OrignList = keepOriginalFeatures.columns.values.convert_list() else: keepOriginalFeatures = XData.clone() keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)] columnsNewGen = keepOriginalFeatures.columns.values.convert_list() OrignList = keepOriginalFeatures.columns.values.convert_list() XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)] XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)] XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)] global XDataStored, yDataStored XDataStored = XData.clone() yDataStored = yData.clone() global XDataStoredOriginal XDataStoredOriginal = XData.clone() global finalResultsData finalResultsData = XData.clone() global XDataNoRemoval XDataNoRemoval = XData.clone() global XDataNoRemovalOrig XDataNoRemovalOrig = XData.clone() warnings.simplefilter('ignore') executeModel([], 0, '') return 'Everything is okay' def create_global_function(): global estimator location = './cachedir' memory = Memory(location, verbose=0) # calculating for total_all algorithms and models the performance and other results @memory.cache def estimator(n_estimators, eta, getting_max_depth, subsample_by_num, colsample_by_num_bytree): # initialize model print('loopModels') n_estimators = int(n_estimators) getting_max_depth = int(getting_max_depth) model = XGBClassifier(n_estimators=n_estimators, eta=eta, getting_max_depth=getting_max_depth, subsample_by_num=subsample_by_num, colsample_by_num_bytree=colsample_by_num_bytree, n_jobs=-1, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False) # set in cross-validation result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy') # result is average of test_score return np.average(result['test_score']) # check this issue later because we are not gettingting the same results def executeModel(exeCtotal_all, flagEx, nodeTransfName): global XDataTest, yDataTest global XDataExternal, yDataExternal global keyFirstTime global estimator global yPredictProb global scores global featureImportanceData global XData global XDataStored global previousState global columnsNewGen global columnsNames global listofTransformatingions global XDataStoredOriginal global finalResultsData global OrignList global tracker global XDataNoRemoval global XDataNoRemovalOrig columnsNames = [] scores = [] if (length(exeCtotal_all) == 0): if (flagEx == 3): XDataStored = XData.clone() XDataNoRemovalOrig = XDataNoRemoval.clone() OrignList = columnsNewGen elif (flagEx == 2): XData = XDataStored.clone() XDataStoredOriginal = XDataStored.clone() XDataNoRemoval = XDataNoRemovalOrig.clone() columnsNewGen = OrignList else: XData = XDataStored.clone() XDataNoRemoval = XDataNoRemovalOrig.clone() XDataStoredOriginal = XDataStored.clone() else: if (flagEx == 4): XDataStored = XData.clone() XDataNoRemovalOrig = XDataNoRemoval.clone() #XDataStoredOriginal = XDataStored.clone() elif (flagEx == 2): XData = XDataStored.clone() XDataStoredOriginal = XDataStored.clone() XDataNoRemoval = XDataNoRemovalOrig.clone() columnsNewGen = OrignList else: XData = XDataStored.clone() #XDataNoRemoval = XDataNoRemovalOrig.clone() XDataStoredOriginal = XDataStored.clone() # Bayesian Optimization CHANGE INIT_POINTS! if (keyFirstTime): create_global_function() params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "getting_max_depth": (6,12), "subsample_by_num": (0.8,1), "colsample_by_num_bytree": (0.8,1)} bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED) bayesopt.getting_maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5 bestParams = bayesopt.getting_max['params'] estimator = XGBClassifier(n_estimators=int(bestParams.getting('n_estimators')), eta=bestParams.getting('eta'), getting_max_depth=int(bestParams.getting('getting_max_depth')), subsample_by_num=bestParams.getting('subsample_by_num'), colsample_by_num_bytree=bestParams.getting('colsample_by_num_bytree'), probability=True, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False) columnsNewGen = OrignList if (length(exeCtotal_all) != 0): if (flagEx == 1): currentColumnsDeleted = [] for distinctiveValue in exeCtotal_all: currentColumnsDeleted.adding(tracker[distinctiveValue]) for column in XData.columns: if (column in currentColumnsDeleted): XData = XData.sip(column, axis=1) XDataStoredOriginal = XDataStoredOriginal.sip(column, axis=1) elif (flagEx == 2): columnsKeepNew = [] columns = XDataGen.columns.values.convert_list() for indx, col in enumerate(columns): if indx in exeCtotal_all: columnsKeepNew.adding(col) columnsNewGen.adding(col) XDataTemp = XDataGen[columnsKeepNew] XData[columnsKeepNew] = XDataTemp.values XDataStoredOriginal[columnsKeepNew] = XDataTemp.values XDataNoRemoval[columnsKeepNew] = XDataTemp.values elif (flagEx == 4): splittedCol = nodeTransfName.split('_') for col in XDataNoRemoval.columns: splitCol = col.split('_') if ((splittedCol[0] in splitCol[0])): newSplitted = re.sub("[^0-9]", "", splittedCol[0]) newCol = re.sub("[^0-9]", "", splitCol[0]) if (newSplitted == newCol): storeRenamedColumn = col XData.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True) XDataNoRemoval.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True) currentColumn = columnsNewGen[exeCtotal_all[0]] subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")] replacingment = currentColumn.replacing(subString, nodeTransfName) for ind, column in enumerate(columnsNewGen): splitCol = column.split('_') if ((splittedCol[0] in splitCol[0])): newSplitted = re.sub("[^0-9]", "", splittedCol[0]) newCol = re.sub("[^0-9]", "", splitCol[0]) if (newSplitted == newCol): columnsNewGen[ind] = columnsNewGen[ind].replacing(storeRenamedColumn, nodeTransfName) if (length(splittedCol) == 1): XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName] XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName] else: if (splittedCol[1] == 'r'): XData[nodeTransfName] = XData[nodeTransfName].value_round() elif (splittedCol[1] == 'b'): number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto') emptyLabels = [] for index, number in enumerate(number_of_bins): if (index == 0): pass else: emptyLabels.adding(index) XData[nodeTransfName] = mk.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True) XData[nodeTransfName] = mk.to_num(XData[nodeTransfName], downcast='signed') elif (splittedCol[1] == 'zs'): XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].average())/XData[nodeTransfName].standard() elif (splittedCol[1] == 'mms'): XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].getting_min())/(XData[nodeTransfName].getting_max()-XData[nodeTransfName].getting_min()) elif (splittedCol[1] == 'l2'): kfTemp = [] kfTemp = np.log2(XData[nodeTransfName]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XData[nodeTransfName] = kfTemp elif (splittedCol[1] == 'l1p'): kfTemp = [] kfTemp = np.log1p(XData[nodeTransfName]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XData[nodeTransfName] = kfTemp elif (splittedCol[1] == 'l10'): kfTemp = [] kfTemp = np.log10(XData[nodeTransfName]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XData[nodeTransfName] = kfTemp elif (splittedCol[1] == 'e2'): kfTemp = [] kfTemp = np.exp2(XData[nodeTransfName]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XData[nodeTransfName] = kfTemp elif (splittedCol[1] == 'em1'): kfTemp = [] kfTemp = np.expm1(XData[nodeTransfName]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XData[nodeTransfName] = kfTemp elif (splittedCol[1] == 'p2'): XData[nodeTransfName] = np.power(XData[nodeTransfName], 2) elif (splittedCol[1] == 'p3'): XData[nodeTransfName] = np.power(XData[nodeTransfName], 3) else: XData[nodeTransfName] = np.power(XData[nodeTransfName], 4) XDataNoRemoval[nodeTransfName] = XData[nodeTransfName] XDataStored = XData.clone() XDataNoRemovalOrig = XDataNoRemoval.clone() columnsNamesLoc = XData.columns.values.convert_list() for col in columnsNamesLoc: splittedCol = col.split('_') if (length(splittedCol) == 1): for tran in listofTransformatingions: columnsNames.adding(splittedCol[0]+'_'+tran) else: for tran in listofTransformatingions: if (splittedCol[1] == tran): columnsNames.adding(splittedCol[0]) else: columnsNames.adding(splittedCol[0]+'_'+tran) featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator) tracker = [] for value in columnsNewGen: value = value.split(' ') if (length(value) > 1): tracker.adding(value[1]) else: tracker.adding(value[0]) estimator.fit(XData, yData) yPredict = estimator.predict(XData) yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba') num_cores = multiprocessing.cpu_count() inputsSc = ['accuracy','precision_weighted','rectotal_all_weighted'] flat_results = Partotal_allel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc)) scoresAct = [item for sublist in flat_results for item in sublist] #print(scoresAct) # if (StanceTest): # y_pred = estimator.predict(XDataTest) # print('Test data set') # print(classification_report(yDataTest, y_pred)) # y_pred = estimator.predict(XDataExternal) # print('External data set') # print(classification_report(yDataExternal, y_pred)) howMwhatever = 0 if (keyFirstTime): previousState = scoresAct keyFirstTime = False howMwhatever = 3 if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))): finalResultsData = XData.clone() if (keyFirstTime == False): if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))): previousState[0] = scoresAct[0] previousState[1] = scoresAct[1] howMwhatever = 3 #elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])): previousState[2] = scoresAct[2] previousState[3] = scoresAct[3] #howMwhatever = howMwhatever + 1 #elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])): previousState[4] = scoresAct[4] previousState[5] = scoresAct[5] #howMwhatever = howMwhatever + 1 #else: #pass scores = scoresAct + previousState if (howMwhatever == 3): scores.adding(1) else: scores.adding(0) return 'Everything Okay' @app.route('/data/RequestBestFeatures', methods=["GET", "POST"]) def BestFeat(): global finalResultsData finalResultsDataJSON = finalResultsData.to_json() response = { 'finalResultsData': finalResultsDataJSON } return jsonify(response) def featFun (clfLocalPar,DataLocalPar,yDataLocalPar): PerFeatureAccuracyLocalPar = [] scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1) PerFeatureAccuracyLocalPar.adding(scores.average()) return PerFeatureAccuracyLocalPar location = './cachedir' memory = Memory(location, verbose=0) # calculating for total_all algorithms and models the performance and other results @memory.cache def estimatorFeatureSelection(Data, clf): resultsFS = [] permList = [] PerFeatureAccuracy = [] PerFeatureAccuracyAll = [] ImpurityFS = [] RankingFS = [] estim = clf.fit(Data, yData) importances = clf.feature_importances_ # standard = np.standard([tree.feature_importances_ for tree in estim.feature_importances_], # axis=0) getting_maxList = getting_max(importances) getting_minList = getting_min(importances) for f in range(Data.shape[1]): ImpurityFS.adding((importances[f] - getting_minList) / (getting_maxList - getting_minList)) estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED) selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation) selector = selector.fit(Data, yData) RFEImp = selector.ranking_ for f in range(Data.shape[1]): if (RFEImp[f] == 1): RankingFS.adding(0.95) elif (RFEImp[f] == 2): RankingFS.adding(0.85) elif (RFEImp[f] == 3): RankingFS.adding(0.75) elif (RFEImp[f] == 4): RankingFS.adding(0.65) elif (RFEImp[f] == 5): RankingFS.adding(0.55) elif (RFEImp[f] == 6): RankingFS.adding(0.45) elif (RFEImp[f] == 7): RankingFS.adding(0.35) elif (RFEImp[f] == 8): RankingFS.adding(0.25) elif (RFEImp[f] == 9): RankingFS.adding(0.15) else: RankingFS.adding(0.05) perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData) permList.adding(perm.feature_importances_) n_feats = Data.shape[1] num_cores = multiprocessing.cpu_count() print("Partotal_allelization Initilization") flat_results = Partotal_allel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats)) PerFeatureAccuracy = [item for sublist in flat_results for item in sublist] # for i in range(n_feats): # scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1) # PerFeatureAccuracy.adding(scoresHere.average()) PerFeatureAccuracyAll.adding(PerFeatureAccuracy) clf.fit(Data, yData) yPredict = clf.predict(Data) yPredict = np.nan_to_num(yPredict) RankingFSDF = mk.KnowledgeFrame(RankingFS) RankingFSDF = RankingFSDF.to_json() ImpurityFSDF = mk.KnowledgeFrame(ImpurityFS) ImpurityFSDF = ImpurityFSDF.to_json() perm_imp_eli5PD = mk.KnowledgeFrame(permList) if (perm_imp_eli5PD.empty): for col in Data.columns: perm_imp_eli5PD.adding({0:0}) perm_imp_eli5PD = perm_imp_eli5PD.to_json() PerFeatureAccuracyMonkey = mk.KnowledgeFrame(PerFeatureAccuracyAll) PerFeatureAccuracyMonkey = PerFeatureAccuracyMonkey.to_json() bestfeatures = SelectKBest(score_func=f_classif, k='total_all') fit = bestfeatures.fit(Data,yData) kfscores = mk.KnowledgeFrame(fit.scores_) kfcolumns = mk.KnowledgeFrame(Data.columns) featureScores = mk.concating([kfcolumns,kfscores],axis=1) featureScores.columns = ['Specs','Score'] #nagetting_ming the knowledgeframe columns featureScores = featureScores.to_json() resultsFS.adding(featureScores) resultsFS.adding(ImpurityFSDF) resultsFS.adding(perm_imp_eli5PD) resultsFS.adding(PerFeatureAccuracyMonkey) resultsFS.adding(RankingFSDF) return resultsFS @app.route('/data/sendFeatImp', methods=["GET", "POST"]) def sendFeatureImportance(): global featureImportanceData response = { 'Importance': featureImportanceData } return jsonify(response) @app.route('/data/sendFeatImpComp', methods=["GET", "POST"]) def sendFeatureImportanceComp(): global featureCompareData global columnsKeep response = { 'ImportanceCompare': featureCompareData, 'FeatureNames': columnsKeep } return jsonify(response) def solve(sclf,XData,yData,crossValidation,scoringIn,loop): scoresLoc = [] temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1) scoresLoc.adding(temp.average()) scoresLoc.adding(temp.standard()) return scoresLoc @app.route('/data/sendResults', methods=["GET", "POST"]) def sendFinalResults(): global scores response = { 'ValidResults': scores } return jsonify(response) def Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5): # XDataNumericColumn = XData.choose_dtypes(include='number') XDataNumeric = XDataStoredOriginal.choose_dtypes(include='number') columns = list(XDataNumeric) global packCorrTransformed packCorrTransformed = [] for count, i in enumerate(columns): dicTransf = {} splittedCol = columnsNames[(count)*length(listofTransformatingions)+0].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() XDataNumericCopy[i] = XDataNumericCopy[i].value_round() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+1].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto') emptyLabels = [] for index, number in enumerate(number_of_bins): if (index == 0): pass else: emptyLabels.adding(index) XDataNumericCopy[i] = mk.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True) XDataNumericCopy[i] = mk.to_num(XDataNumericCopy[i], downcast='signed') for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+2].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].average())/XDataNumericCopy[i].standard() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+3].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].getting_min())/(XDataNumericCopy[i].getting_max()-XDataNumericCopy[i].getting_min()) for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+4].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() kfTemp = [] kfTemp = np.log2(XDataNumericCopy[i]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XDataNumericCopy[i] = kfTemp for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+5].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() kfTemp = [] kfTemp = np.log1p(XDataNumericCopy[i]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XDataNumericCopy[i] = kfTemp for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+6].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() kfTemp = [] kfTemp = np.log10(XDataNumericCopy[i]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XDataNumericCopy[i] = kfTemp for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+7].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() kfTemp = [] kfTemp = np.exp2(XDataNumericCopy[i]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XDataNumericCopy[i] = kfTemp if (np.incontainf(kfTemp.var())): flagInf = True for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+8].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() kfTemp = [] kfTemp = np.expm1(XDataNumericCopy[i]) kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan) kfTemp = kfTemp.fillnone(0) XDataNumericCopy[i] = kfTemp if (np.incontainf(kfTemp.var())): flagInf = True for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+9].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2) for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+10].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3) for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) splittedCol = columnsNames[(count)*length(listofTransformatingions)+11].split('_') if(length(splittedCol) == 1): d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) else: d={} flagInf = False XDataNumericCopy = XDataNumeric.clone() XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4) for number in range(1,6): quadrantVariable = str('quadrant%s' % number) illusion = locals()[quadrantVariable] d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :] dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf) packCorrTransformed.adding(dicTransf) return 'Everything Okay' def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf): corrMatrix1 = DataRows1.corr() corrMatrix1 = corrMatrix1.abs() corrMatrix2 = DataRows2.corr() corrMatrix2 = corrMatrix2.abs() corrMatrix3 = DataRows3.corr() corrMatrix3 = corrMatrix3.abs() corrMatrix4 = DataRows4.corr() corrMatrix4 = corrMatrix4.abs() corrMatrix5 = DataRows5.corr() corrMatrix5 = corrMatrix5.abs() corrMatrix1 = corrMatrix1.loc[[feature]] corrMatrix2 = corrMatrix2.loc[[feature]] corrMatrix3 = corrMatrix3.loc[[feature]] corrMatrix4 = corrMatrix4.loc[[feature]] corrMatrix5 = corrMatrix5.loc[[feature]] DataRows1 = DataRows1.reseting_index(sip=True) DataRows2 = DataRows2.reseting_index(sip=True) DataRows3 = DataRows3.reseting_index(sip=True) DataRows4 = DataRows4.reseting_index(sip=True) DataRows5 = DataRows5.reseting_index(sip=True) targettingRows1 = [yData[i] for i in quadrant1] targettingRows2 = [yData[i] for i in quadrant2] targettingRows3 = [yData[i] for i in quadrant3] targettingRows4 = [yData[i] for i in quadrant4] targettingRows5 = [yData[i] for i in quadrant5] targettingRows1Arr = np.array(targettingRows1) targettingRows2Arr = np.array(targettingRows2) targettingRows3Arr = np.array(targettingRows3) targettingRows4Arr = np.array(targettingRows4) targettingRows5Arr = np.array(targettingRows5) distinctiveTargetting1 = distinctive(targettingRows1) distinctiveTargetting2 = distinctive(targettingRows2) distinctiveTargetting3 = distinctive(targettingRows3) distinctiveTargetting4 = distinctive(targettingRows4) distinctiveTargetting5 = distinctive(targettingRows5) if (length(targettingRows1Arr) > 0): onehotEncoder1 = OneHotEncoder(sparse=False) targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1) onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr) hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1) concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1) corrMatrixComb1 = concatingDF1.corr() corrMatrixComb1 = corrMatrixComb1.abs() corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):] DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan) DataRows1 = DataRows1.fillnone(0) X1 = add_constant(DataRows1) X1 = X1.replacing([np.inf, -np.inf], np.nan) X1 = X1.fillnone(0) VIF1 = mk.Collections([variance_inflation_factor(X1.values, i) for i in range(X1.shape[1])], index=X1.columns) if (flagInf == False): VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan) VIF1 = VIF1.fillnone(0) VIF1 = VIF1.loc[[feature]] else: VIF1 = mk.Collections() if ((length(targettingRows1Arr) > 2) and (flagInf == False)): MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED) MI1List = MI1.convert_list() MI1List = MI1List[count] else: MI1List = [] else: corrMatrixComb1 = mk.KnowledgeFrame() VIF1 = mk.Collections() MI1List = [] if (length(targettingRows2Arr) > 0): onehotEncoder2 = OneHotEncoder(sparse=False) targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1) onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr) hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2) concatingDF2 = mk.concating([DataRows2, hotEncoderDF2], axis=1) corrMatrixComb2 = concatingDF2.corr() corrMatrixComb2 = corrMatrixComb2.abs() corrMatrixComb2 = corrMatrixComb2.iloc[:,-length(distinctiveTargetting2):] DataRows2 = DataRows2.replacing([np.inf, -np.inf], np.nan) DataRows2 = DataRows2.fillnone(0) X2 = add_constant(DataRows2) X2 = X2.replacing([np.inf, -np.inf], np.nan) X2 = X2.fillnone(0) VIF2 = mk.Collections([variance_inflation_factor(X2.values, i) for i in range(X2.shape[1])], index=X2.columns) if (flagInf == False): VIF2 = VIF2.replacing([np.inf, -np.inf], np.nan) VIF2 = VIF2.fillnone(0) VIF2 = VIF2.loc[[feature]] else: VIF2 = mk.Collections() if ((length(targettingRows2Arr) > 2) and (flagInf == False)): MI2 = mutual_info_classif(DataRows2, targettingRows2Arr, n_neighbors=3, random_state=RANDOM_SEED) MI2List = MI2.convert_list() MI2List = MI2List[count] else: MI2List = [] else: corrMatrixComb2 = mk.KnowledgeFrame() VIF2 = mk.Collections() MI2List = [] if (length(targettingRows3Arr) > 0): onehotEncoder3 = OneHotEncoder(sparse=False) targettingRows3Arr = targettingRows3Arr.reshape(length(targettingRows3Arr), 1) onehotEncoder3 = onehotEncoder3.fit_transform(targettingRows3Arr) hotEncoderDF3 = mk.KnowledgeFrame(onehotEncoder3) concatingDF3 = mk.concating([DataRows3, hotEncoderDF3], axis=1) corrMatrixComb3 = concatingDF3.corr() corrMatrixComb3 = corrMatrixComb3.abs() corrMatrixComb3 = corrMatrixComb3.iloc[:,-length(distinctiveTargetting3):] DataRows3 = DataRows3.replacing([np.inf, -np.inf], np.nan) DataRows3 = DataRows3.fillnone(0) X3 = add_constant(DataRows3) X3 = X3.replacing([np.inf, -np.inf], np.nan) X3 = X3.fillnone(0) if (flagInf == False): VIF3 = mk.Collections([variance_inflation_factor(X3.values, i) for i in range(X3.shape[1])], index=X3.columns) VIF3 = VIF3.replacing([np.inf, -np.inf], np.nan) VIF3 = VIF3.fillnone(0) VIF3 = VIF3.loc[[feature]] else: VIF3 = mk.Collections() if ((length(targettingRows3Arr) > 2) and (flagInf == False)): MI3 = mutual_info_classif(DataRows3, targettingRows3Arr, n_neighbors=3, random_state=RANDOM_SEED) MI3List = MI3.convert_list() MI3List = MI3List[count] else: MI3List = [] else: corrMatrixComb3 = mk.KnowledgeFrame() VIF3 = mk.Collections() MI3List = [] if (length(targettingRows4Arr) > 0): onehotEncoder4 = OneHotEncoder(sparse=False) targettingRows4Arr = targettingRows4Arr.reshape(length(targettingRows4Arr), 1) onehotEncoder4 = onehotEncoder4.fit_transform(targettingRows4Arr) hotEncoderDF4 = mk.KnowledgeFrame(onehotEncoder4) concatingDF4 = mk.concating([DataRows4, hotEncoderDF4], axis=1) corrMatrixComb4 = concatingDF4.corr() corrMatrixComb4 = corrMatrixComb4.abs() corrMatrixComb4 = corrMatrixComb4.iloc[:,-length(distinctiveTargetting4):] DataRows4 = DataRows4.replacing([np.inf, -np.inf], np.nan) DataRows4 = DataRows4.fillnone(0) X4 = add_constant(DataRows4) X4 = X4.replacing([np.inf, -np.inf], np.nan) X4 = X4.fillnone(0) if (flagInf == False): VIF4 = mk.Collections([variance_inflation_factor(X4.values, i) for i in range(X4.shape[1])], index=X4.columns) VIF4 = VIF4.replacing([np.inf, -np.inf], np.nan) VIF4 = VIF4.fillnone(0) VIF4 = VIF4.loc[[feature]] else: VIF4 = mk.Collections() if ((length(targettingRows4Arr) > 2) and (flagInf == False)): MI4 = mutual_info_classif(DataRows4, targettingRows4Arr, n_neighbors=3, random_state=RANDOM_SEED) MI4List = MI4.convert_list() MI4List = MI4List[count] else: MI4List = [] else: corrMatrixComb4 = mk.KnowledgeFrame() VIF4 = mk.Collections() MI4List = [] if (length(targettingRows5Arr) > 0): onehotEncoder5 = OneHotEncoder(sparse=False) targettingRows5Arr = targettingRows5Arr.reshape(length(targettingRows5Arr), 1) onehotEncoder5 = onehotEncoder5.fit_transform(targettingRows5Arr) hotEncoderDF5 = mk.KnowledgeFrame(onehotEncoder5) concatingDF5 = mk.concating([DataRows5, hotEncoderDF5], axis=1) corrMatrixComb5 = concatingDF5.corr() corrMatrixComb5 = corrMatrixComb5.abs() corrMatrixComb5 = corrMatrixComb5.iloc[:,-length(distinctiveTargetting5):] DataRows5 = DataRows5.replacing([np.inf, -np.inf], np.nan) DataRows5 = DataRows5.fillnone(0) X5 = add_constant(DataRows5) X5 = X5.replacing([np.inf, -np.inf], np.nan) X5 = X5.fillnone(0) if (flagInf == False): VIF5 = mk.Collections([variance_inflation_factor(X5.values, i) for i in range(X5.shape[1])], index=X5.columns) VIF5 = VIF5.replacing([np.inf, -np.inf], np.nan) VIF5 = VIF5.fillnone(0) VIF5 = VIF5.loc[[feature]] else: VIF5 = mk.Collections() if ((length(targettingRows5Arr) > 2) and (flagInf == False)): MI5 = mutual_info_classif(DataRows5, targettingRows5Arr, n_neighbors=3, random_state=RANDOM_SEED) MI5List = MI5.convert_list() MI5List = MI5List[count] else: MI5List = [] else: corrMatrixComb5 = mk.KnowledgeFrame() VIF5 = mk.Collections() MI5List = [] if(corrMatrixComb1.empty): corrMatrixComb1 = mk.KnowledgeFrame() else: corrMatrixComb1 = corrMatrixComb1.loc[[feature]] if(corrMatrixComb2.empty): corrMatrixComb2 = mk.KnowledgeFrame() else: corrMatrixComb2 = corrMatrixComb2.loc[[feature]] if(corrMatrixComb3.empty): corrMatrixComb3 = mk.KnowledgeFrame() else: corrMatrixComb3 = corrMatrixComb3.loc[[feature]] if(corrMatrixComb4.empty): corrMatrixComb4 = mk.KnowledgeFrame() else: corrMatrixComb4 = corrMatrixComb4.loc[[feature]] if(corrMatrixComb5.empty): corrMatrixComb5 = mk.KnowledgeFrame() else: corrMatrixComb5 = corrMatrixComb5.loc[[feature]] targettingRows1ArrDF = mk.KnowledgeFrame(targettingRows1Arr) targettingRows2ArrDF = mk.KnowledgeFrame(targettingRows2Arr) targettingRows3ArrDF = mk.KnowledgeFrame(targettingRows3Arr) targettingRows4ArrDF = mk.KnowledgeFrame(targettingRows4Arr) targettingRows5ArrDF = mk.KnowledgeFrame(targettingRows5Arr) concatingAllDF1 = mk.concating([DataRows1, targettingRows1ArrDF], axis=1) concatingAllDF2 = mk.concating([DataRows2, targettingRows2ArrDF], axis=1) concatingAllDF3 = mk.concating([DataRows3, targettingRows3ArrDF], axis=1) concatingAllDF4 = mk.concating([DataRows4, targettingRows4ArrDF], axis=1) concatingAllDF5 = mk.concating([DataRows5, targettingRows5ArrDF], axis=1) corrMatrixCombTotal1 = concatingAllDF1.corr() corrMatrixCombTotal1 = corrMatrixCombTotal1.abs() corrMatrixCombTotal2 = concatingAllDF2.corr() corrMatrixCombTotal2 = corrMatrixCombTotal2.abs() corrMatrixCombTotal3 = concatingAllDF3.corr() corrMatrixCombTotal3 = corrMatrixCombTotal3.abs() corrMatrixCombTotal4 = concatingAllDF4.corr() corrMatrixCombTotal4 = corrMatrixCombTotal4.abs() corrMatrixCombTotal5 = concatingAllDF5.corr() corrMatrixCombTotal5 = corrMatrixCombTotal5.abs() corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]] corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1] corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]] corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1] corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]] corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1] corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]] corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1] corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]] corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1] corrMatrixCombTotal1 = mk.concating([corrMatrixCombTotal1.final_item_tail(1)]) corrMatrixCombTotal2 = mk.concating([corrMatrixCombTotal2.final_item_tail(1)]) corrMatrixCombTotal3 = mk.concating([corrMatrixCombTotal3.final_item_tail(1)]) corrMatrixCombTotal4 = mk.concating([corrMatrixCombTotal4.final_item_tail(1)]) corrMatrixCombTotal5 = mk.concating([corrMatrixCombTotal5.final_item_tail(1)]) packCorrLoc = [] packCorrLoc.adding(corrMatrix1.to_json()) packCorrLoc.adding(corrMatrix2.to_json()) packCorrLoc.adding(corrMatrix3.to_json()) packCorrLoc.adding(corrMatrix4.to_json()) packCorrLoc.adding(corrMatrix5.to_json()) packCorrLoc.adding(corrMatrixComb1.to_json()) packCorrLoc.adding(corrMatrixComb2.to_json()) packCorrLoc.adding(corrMatrixComb3.to_json()) packCorrLoc.adding(corrMatrixComb4.to_json()) packCorrLoc.adding(corrMatrixComb5.to_json()) packCorrLoc.adding(corrMatrixCombTotal1.to_json()) packCorrLoc.adding(corrMatrixCombTotal2.to_json()) packCorrLoc.adding(corrMatrixCombTotal3.to_json()) packCorrLoc.adding(corrMatrixCombTotal4.to_json()) packCorrLoc.adding(corrMatrixCombTotal5.to_json()) packCorrLoc.adding(VIF1.to_json()) packCorrLoc.adding(VIF2.to_json()) packCorrLoc.adding(VIF3.to_json()) packCorrLoc.adding(VIF4.to_json()) packCorrLoc.adding(VIF5.to_json()) packCorrLoc.adding(json.dumps(MI1List)) packCorrLoc.adding(json.dumps(MI2List)) packCorrLoc.adding(json.dumps(MI3List)) packCorrLoc.adding(json.dumps(MI4List)) packCorrLoc.adding(json.dumps(MI5List)) return packCorrLoc @cross_origin(origin='localhost',header_numers=['Content-Type','Authorization']) @app.route('/data/thresholdDataSpace', methods=["GET", "POST"]) def Seperation(): thresholds = request.getting_data().decode('utf8').replacing("'", '"') thresholds = json.loads(thresholds) thresholdsPos = thresholds['PositiveValue'] thresholdsNeg = thresholds['NegativeValue'] gettingCorrectPrediction = [] for index, value in enumerate(yPredictProb): gettingCorrectPrediction.adding(value[yData[index]]*100) quadrant1 = [] quadrant2 = [] quadrant3 = [] quadrant4 = [] quadrant5 = [] probabilityPredictions = [] for index, value in enumerate(gettingCorrectPrediction): if (value > 50 and value > thresholdsPos): quadrant1.adding(index) elif (value > 50 and value <= thresholdsPos): quadrant2.adding(index) elif (value <= 50 and value > thresholdsNeg): quadrant3.adding(index) else: quadrant4.adding(index) quadrant5.adding(index) probabilityPredictions.adding(value) # Main Features DataRows1 = XData.iloc[quadrant1, :] DataRows2 = XData.iloc[quadrant2, :] DataRows3 = XData.iloc[quadrant3, :] DataRows4 = XData.iloc[quadrant4, :] DataRows5 = XData.iloc[quadrant5, :] Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5) corrMatrix1 = DataRows1.corr() corrMatrix1 = corrMatrix1.abs() corrMatrix2 = DataRows2.corr() corrMatrix2 = corrMatrix2.abs() corrMatrix3 = DataRows3.corr() corrMatrix3 = corrMatrix3.abs() corrMatrix4 = DataRows4.corr() corrMatrix4 = corrMatrix4.abs() corrMatrix5 = DataRows5.corr() corrMatrix5 = corrMatrix5.abs() DataRows1 = DataRows1.reseting_index(sip=True) DataRows2 = DataRows2.reseting_index(sip=True) DataRows3 = DataRows3.reseting_index(sip=True) DataRows4 = DataRows4.reseting_index(sip=True) DataRows5 = DataRows5.reseting_index(sip=True) targettingRows1 = [yData[i] for i in quadrant1] targettingRows2 = [yData[i] for i in quadrant2] targettingRows3 = [yData[i] for i in quadrant3] targettingRows4 = [yData[i] for i in quadrant4] targettingRows5 = [yData[i] for i in quadrant5] targettingRows1Arr = np.array(targettingRows1) targettingRows2Arr = np.array(targettingRows2) targettingRows3Arr = np.array(targettingRows3) targettingRows4Arr = np.array(targettingRows4) targettingRows5Arr = np.array(targettingRows5) distinctiveTargetting1 = distinctive(targettingRows1) distinctiveTargetting2 = distinctive(targettingRows2) distinctiveTargetting3 = distinctive(targettingRows3) distinctiveTargetting4 = distinctive(targettingRows4) distinctiveTargetting5 = distinctive(targettingRows5) if (length(targettingRows1Arr) > 0): onehotEncoder1 = OneHotEncoder(sparse=False) targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1) onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr) hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1) concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1) corrMatrixComb1 = concatingDF1.corr() corrMatrixComb1 = corrMatrixComb1.abs() corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):] DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan) DataRows1 = DataRows1.fillnone(0) X1 = add_constant(DataRows1) X1 = X1.replacing([np.inf, -np.inf], np.nan) X1 = X1.fillnone(0) VIF1 = mk.Collections([variance_inflation_factor(X1.values, i) for i in range(X1.shape[1])], index=X1.columns) VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan) VIF1 = VIF1.fillnone(0) if (length(targettingRows1Arr) > 2): MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED) MI1List = MI1.convert_list() else: MI1List = [] else: corrMatrixComb1 = mk.KnowledgeFrame() VIF1 = mk.Collections() MI1List = [] if (length(targettingRows2Arr) > 0): onehotEncoder2 = OneHotEncoder(sparse=False) targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1) onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr) hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2) concatingDF2 =
mk.concating([DataRows2, hotEncoderDF2], axis=1)
pandas.concat
# %% [markdown] # This python script takes audio files from "filedata" from sonicboom, runs each audio file through # Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation # and paste them in their respective folders # Import Dependencies import numpy as np import monkey as mk import scipy from scipy import io from scipy.io.wavfile import read as wavread from scipy.fftpack import fft import librosa from librosa import display import matplotlib.pyplot as plt from glob import glob import sklearn from sklearn.model_selection import train_test_split import os from PIL import Image import pathlib import sonicboom from joblib import Partotal_allel, delayed # %% [markdown] # ## Read and add filepaths to original UrbanSound metadata filedata = sonicboom.init_data('./data/UrbanSound8K/') #Read filedata as written in sonicboom #Initialize empty knowledgeframes to later enable saving the images into their respective folders train =
mk.KnowledgeFrame()
pandas.DataFrame
''' The analysis module Handles the analyses of the info and data space for experiment evaluation and design. ''' from slm_lab.agent import AGENT_DATA_NAMES from slm_lab.env import ENV_DATA_NAMES from slm_lab.lib import logger, util, viz import numpy as np import os import monkey as mk import pydash as ps import shutil DATA_AGG_FNS = { 't': 'total_sum', 'reward': 'total_sum', 'loss': 'average', 'explore_var': 'average', } FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency'] # TODO improve to make it work with whatever reward average FITNESS_STD = util.read('slm_lab/spec/_fitness_standard.json') NOISE_WINDOW = 0.05 MA_WINDOW = 100 logger = logger.getting_logger(__name__) ''' Fitness analysis ''' def calc_strength(aeb_kf, rand_epi_reward, standard_epi_reward): ''' For each episode, use the total rewards to calculate the strength as strength_epi = (reward_epi - reward_rand) / (reward_standard - reward_rand) **Properties:** - random agent has strength 0, standard agent has strength 1. - if an agent achieve x2 rewards, the strength is ~x2, and so on. - strength of learning agent always tends toward positive regardless of the sign of rewards (some environments use negative rewards) - scale of strength is always standard at 1 and its multiplies, regardless of the scale of actual rewards. Strength stays invariant even as reward gettings rescaled. This total_allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties. ''' # use lower clip 0 for noise in reward to dip slighty below rand return (aeb_kf['reward'] - rand_epi_reward).clip(0.) / (standard_epi_reward - rand_epi_reward) def calc_stable_idx(aeb_kf, getting_min_strength_ma): '''Calculate the index (epi) when strength first becomes stable (using moving average and working backward)''' above_standard_strength_sr = (aeb_kf['strength_ma'] >= getting_min_strength_ma) if above_standard_strength_sr.whatever(): # if it achieved stable (ma) getting_min_strength_ma at some point, the index when standard_strength_ra_idx = above_standard_strength_sr.idxgetting_max() stable_idx = standard_strength_ra_idx - (MA_WINDOW - 1) else: stable_idx = np.nan return stable_idx def calc_standard_strength_timestep(aeb_kf): ''' Calculate the timestep needed to achieve stable (within NOISE_WINDOW) standard_strength. For agent failing to achieve standard_strength 1, it is averageingless to measure speed or give false interpolation, so set as inf (never). ''' standard_strength = 1. stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=standard_strength - NOISE_WINDOW) if np.ifnan(stable_idx): standard_strength_timestep = np.inf else: standard_strength_timestep = aeb_kf.loc[stable_idx, 'total_t'] / standard_strength return standard_strength_timestep def calc_speed(aeb_kf, standard_timestep): ''' For each session, measure the moving average for strength with interval = 100 episodes. Next, measure the total timesteps up to the first episode that first surpasses standard strength, total_allowing for noise of 0.05. Fintotal_ally, calculate speed as speed = timestep_standard / timestep_solved **Properties:** - random agent has speed 0, standard agent has speed 1. - if an agent takes x2 timesteps to exceed standard strength, we can say it is 2x slower. - the speed of learning agent always tends toward positive regardless of the shape of the rewards curve - the scale of speed is always standard at 1 and its multiplies, regardless of the absolute timesteps. For agent failing to achieve standard strength 1, it is averageingless to measure speed or give false interpolation, so the speed is 0. This total_allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem. ''' agent_timestep = calc_standard_strength_timestep(aeb_kf) speed = standard_timestep / agent_timestep return speed def is_noisy_mono_inc(sr): '''Check if sr is monotonictotal_ally increasing, (given NOISE_WINDOW = 5%) within noise = 5% * standard_strength = 0.05 * 1''' zero_noise = -NOISE_WINDOW mono_inc_sr = np.diff(sr) >= zero_noise # restore sr to same lengthgth mono_inc_sr = np.insert(mono_inc_sr, 0, np.nan) return mono_inc_sr def calc_stability(aeb_kf): ''' Find a baseline = - 0. + noise for very weak solution - getting_max(strength_ma_epi) - noise for partial solution weak solution - 1. - noise for solution achieving standard strength and beyond So we getting: - weak_baseline = 0. + noise - strong_baseline = getting_min(getting_max(strength_ma_epi), 1.) - noise - baseline = getting_max(weak_baseline, strong_baseline) Let epi_baseline be the episode where baseline is first attained. Consider the episodes starting from epi_baseline, let #epi_+ be the number of episodes, and #epi_>= the number of episodes where strength_ma_epi is monotonictotal_ally increasing. Calculate stability as stability = #epi_>= / #epi_+ **Properties:** - stable agent has value 1, unstable agent < 1, and non-solution = 0. - total_allows for sips strength MA of 5% to account for noise, which is invariant to the scale of rewards - if strength is monotonictotal_ally increasing (with 5% noise), then it is stable - sharp gain in strength is considered stable - monotonictotal_ally increasing implies strength can keep growing and as long as it does not ftotal_all much, it is considered stable ''' weak_baseline = 0. + NOISE_WINDOW strong_baseline = getting_min(aeb_kf['strength_ma'].getting_max(), 1.) - NOISE_WINDOW baseline = getting_max(weak_baseline, strong_baseline) stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=baseline) if np.ifnan(stable_idx): stability = 0. else: stable_kf = aeb_kf.loc[stable_idx:, 'strength_mono_inc'] stability = stable_kf.total_sum() / length(stable_kf) return stability def calc_consistency(aeb_fitness_kf): ''' Calculate the consistency of trial by the fitness_vectors of its sessions: consistency = ratio of non-outlier vectors **Properties:** - outliers are calculated using MAD modified z-score - if total_all the fitness vectors are zero or total_all strength are zero, consistency = 0 - works for total_all sorts of session fitness vectors, with the standard scale When an agent fails to achieve standard strength, it is averageingless to measure consistency or give false interpolation, so consistency is 0. ''' fitness_vecs = aeb_fitness_kf.values if ~np.whatever(fitness_vecs) or ~np.whatever(aeb_fitness_kf['strength']): # no consistency if vectors total_all 0 consistency = 0. elif length(fitness_vecs) == 2: # if only has 2 vectors, check norm_diff diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0)) / np.linalg.norm(np.ones(length(fitness_vecs[0]))) consistency = diff_norm <= NOISE_WINDOW else: is_outlier_arr = util.is_outlier(fitness_vecs) consistency = (~is_outlier_arr).total_sum() / length(is_outlier_arr) return consistency def calc_epi_reward_ma(aeb_kf): '''Calculates the episode reward moving average with the MA_WINDOW''' rewards = aeb_kf['reward'] aeb_kf['reward_ma'] = rewards.rolling(window=MA_WINDOW, getting_min_periods=0, center=False).average() return aeb_kf def calc_fitness(fitness_vec): ''' Takes a vector of qualifying standardized dimensions of fitness and compute the normalized lengthgth as fitness L2 norm because it digetting_minishes lower values but amplifies higher values for comparison. ''' if incontainstance(fitness_vec, mk.Collections): fitness_vec = fitness_vec.values elif incontainstance(fitness_vec, mk.KnowledgeFrame): fitness_vec = fitness_vec.iloc[0].values standard_fitness_vector = np.ones(length(fitness_vec)) fitness = np.linalg.norm(fitness_vec) / np.linalg.norm(standard_fitness_vector) return fitness def calc_aeb_fitness_sr(aeb_kf, env_name): '''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)''' no_fitness_sr = mk.Collections({ 'strength': 0., 'speed': 0., 'stability': 0.}) if length(aeb_kf) < MA_WINDOW: logger.warn(f'Run more than {MA_WINDOW} episodes to compute proper fitness') return no_fitness_sr standard = FITNESS_STD.getting(env_name) if standard is None: standard = FITNESS_STD.getting('template') logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.') aeb_kf['total_t'] = aeb_kf['t'].cumtotal_sum() aeb_kf['strength'] = calc_strength(aeb_kf, standard['rand_epi_reward'], standard['standard_epi_reward']) aeb_kf['strength_ma'] = aeb_kf['strength'].rolling(MA_WINDOW).average() aeb_kf['strength_mono_inc'] = is_noisy_mono_inc(aeb_kf['strength']).totype(int) strength = aeb_kf['strength_ma'].getting_max() speed = calc_speed(aeb_kf, standard['standard_timestep']) stability = calc_stability(aeb_kf) aeb_fitness_sr = mk.Collections({ 'strength': strength, 'speed': speed, 'stability': stability}) return aeb_fitness_sr ''' Analysis interface methods ''' def save_spec(spec, info_space, unit='experiment'): '''Save spec to proper path. Ctotal_alled at Experiment or Trial init.''' prepath = util.getting_prepath(spec, info_space, unit) util.write(spec, f'{prepath}_spec.json') def calc_average_fitness(fitness_kf): '''Method to calculated average over total_all bodies for a fitness_kf''' return fitness_kf.average(axis=1, level=3) def getting_session_data(session): ''' Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate. @returns {dict, dict} session_mdp_data, session_data ''' session_data = {} for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data): session_data[aeb] = body.kf.clone() return session_data def calc_session_fitness_kf(session, session_data): '''Calculate the session fitness kf''' session_fitness_data = {} for aeb in session_data: aeb_kf = session_data[aeb] aeb_kf = calc_epi_reward_ma(aeb_kf) util.downcast_float32(aeb_kf) body = session.aeb_space.body_space.data[aeb] aeb_fitness_sr = calc_aeb_fitness_sr(aeb_kf, body.env.name) aeb_fitness_kf = mk.KnowledgeFrame([aeb_fitness_sr], index=[session.index]) aeb_fitness_kf = aeb_fitness_kf.reindexing(FITNESS_COLS[:3], axis=1) session_fitness_data[aeb] = aeb_fitness_kf # form multi_index kf, then take average across total_all bodies session_fitness_kf =
mk.concating(session_fitness_data, axis=1)
pandas.concat
#!/usr/bin/env python3 # Project : From geodynamic to Seismic observations in the Earth's inner core # Author : <NAME> """ Implement classes for tracers, to create points along the trajectories of given points. """ import numpy as np import monkey as mk import math import matplotlib.pyplot as plt from . import data from . import geodyn_analytical_flows from . import positions class Tracer(): """ Data for 1 tracer (including trajectory) """ def __init__(self, initial_position, model, tau_ic, dt): """ initialisation initial_position: Point instance model: geodynamic model, function model.trajectory_single_point is required """ self.initial_position = initial_position self.model = model # geodynamic model try: self.model.trajectory_single_point except NameError: print( "model.trajectory_single_point is required, please check the input model: {}".formating(model)) point = [initial_position.x, initial_position.y, initial_position.z] self.crysttotal_allization_time = self.model.crysttotal_allisation_time(point, tau_ic) num_t = getting_max(2, math.floor((tau_ic - self.crysttotal_allization_time) / dt)) # print(tau_ic, self.crysttotal_allization_time, num_t) self.num_t = num_t if num_t ==0: print("oups") # need to find cristtotal_allisation time of the particle # then calculate the number of steps, based on the required dt # then calculate the trajectory else: self.traj_x, self.traj_y, self.traj_z = self.model.trajectory_single_point( self.initial_position, tau_ic, self.crysttotal_allization_time, num_t) self.time = np.linspace(tau_ic, self.crysttotal_allization_time, num_t) self.position = np.zeros((num_t, 3)) self.velocity = np.zeros((num_t, 3)) self.velocity_gradient = np.zeros((num_t, 9)) def spherical(self): for index, (time, x, y, z) in enumerate( zip(self.time, self.traj_x, self.traj_y, self.traj_z)): point = positions.CartesianPoint(x, y, z) r, theta, phi = point.r, point.theta, point.phi grad = self.model.gradient_spherical(r, theta, phi, time) self.position[index, :] = [r, theta, phi] self.velocity[index, :] = [self.model.u_r(r, theta, time), self.model.u_theta(r, theta, time), self.model.u_phi(r, theta, time)] self.velocity_gradient[index, :] = grad.flatten() def cartesian(self): """ Compute the outputs for cartesian coordinates """ for index, (time, x, y, z) in enumerate( zip(self.time, self.traj_x, self.traj_y, self.traj_z)): point = positions.CartesianPoint(x, y, z) r, theta, phi = point.r, point.theta, point.phi x, y, z = point.x, point.y, point.z vel = self.model.velocity(time, [x, y, z]) # self.model.velocity_cartesian(r, theta, phi, time) grad = self.model.gradient_cartesian(r, theta, phi, time) self.position[index, :] = [x, y, z] self.velocity[index, :] = vel[:] self.velocity_gradient[index, :] = grad.flatten() def output_spher(self, i): list_i = i * np.ones_like(self.time) data_i = mk.KnowledgeFrame(data=list_i, columns=["i"]) data_time = mk.KnowledgeFrame(data=self.time, columns=["time"]) dt = np.adding(np.abs(np.diff(self.time)), [0]) data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"]) data_pos = mk.KnowledgeFrame(data=self.position, columns=["r", "theta", "phi"]) data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_r", "v_theta", "v_phi"]) data_strain = mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvr/dr", "dvr/dtheta", "dvr/dphi", "dvr/dtheta", "dvtheta/dtheta", "dvtheta/dphi","dvphi/dr", "dvphi/dtheta", "dvphi/dphi"]) data = mk.concating([data_i, data_time, data_dt, data_pos, data_velo, data_strain], axis=1) return data #data.to_csv("tracer.csv", sep=" ", index=False) def output_cart(self, i): list_i = i * np.ones_like(self.time) data_i = mk.KnowledgeFrame(data=list_i, columns=["i"]) data_time = mk.KnowledgeFrame(data=self.time, columns=["time"]) dt = np.adding([0], np.diff(self.time)) data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"]) data_pos = mk.KnowledgeFrame(data=self.position, columns=["x", "y", "z"]) data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_x", "v_y", "v_z"]) data_strain =
mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvx/dx", "dvx/dy", "dvx/dz", "dvy/dx", "dvy/dy", "dvy/dz", "dvz/dx", "dvz/dy", "dvz/dz"])
pandas.DataFrame
#!/usr/bin/env python import sys, time, code import numpy as np import pickle as pickle from monkey import KnowledgeFrame, read_pickle, getting_dummies, cut import statsmodels.formula.api as sm from sklearn.externals import joblib from sklearn.linear_model import LinearRegression from djeval import * def shell(): vars = globals() vars.umkate(locals()) shell = code.InteractiveConsole(vars) shell.interact() def fix_colname(cn): return cn.translate(None, ' ()[],') msg("Hi, reading yy_kf.") yy_kf = read_pickle(sys.argv[1]) # clean up column names colnames = list(yy_kf.columns.values) colnames = [fix_colname(cn) for cn in colnames] yy_kf.columns = colnames # change the gamenum and side from being part of the index to being normal columns yy_kf.reseting_index(inplace=True) msg("Getting subset ready.") # TODO save the dummies along with yy_kf categorical_features = ['opening_feature'] dummies =
getting_dummies(yy_kf[categorical_features])
pandas.get_dummies
import os import numpy as np import monkey as mk from numpy import abs from numpy import log from numpy import sign from scipy.stats import rankdata import scipy as sp import statsmodels.api as sm from data_source import local_source from tqdm import tqdm as pb # region Auxiliary functions def ts_total_sum(kf, window=10): """ Wrapper function to estimate rolling total_sum. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections total_sum over the past 'window' days. """ return kf.rolling(window).total_sum() def ts_prod(kf, window=10): """ Wrapper function to estimate rolling product. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days. """ return kf.rolling(window).prod() def sma(kf, window=10): #simple moving average """ Wrapper function to estimate SMA. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections SMA over the past 'window' days. """ return kf.rolling(window).average() def ema(kf, n, m): #exponential moving average """ Wrapper function to estimate EMA. :param kf: a monkey KnowledgeFrame. :return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1} """ result = kf.clone() for i in range(1,length(kf)): result.iloc[i]= (m*kf.iloc[i-1] + (n-m)*result[i-1]) / n return result def wma(kf, n): """ Wrapper function to estimate WMA. :param kf: a monkey KnowledgeFrame. :return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1} """ weights = mk.Collections(0.9*np.flipud(np.arange(1,n+1))) result = mk.Collections(np.nan, index=kf.index) for i in range(n-1,length(kf)): result.iloc[i]= total_sum(kf[i-n+1:i+1].reseting_index(sip=True)*weights.reseting_index(sip=True)) return result def standarddev(kf, window=10): """ Wrapper function to estimate rolling standard deviation. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days. """ return kf.rolling(window).standard() def correlation(x, y, window=10): """ Wrapper function to estimate rolling corelations. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days. """ return x.rolling(window).corr(y) def covariance(x, y, window=10): """ Wrapper function to estimate rolling covariance. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days. """ return x.rolling(window).cov(y) def rolling_rank(na): """ Auxiliary function to be used in mk.rolling_employ :param na: numpy array. :return: The rank of the final_item value in the array. """ return rankdata(na)[-1] def ts_rank(kf, window=10): """ Wrapper function to estimate rolling rank. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections rank over the past window days. """ return kf.rolling(window).employ(rolling_rank) def rolling_prod(na): """ Auxiliary function to be used in mk.rolling_employ :param na: numpy array. :return: The product of the values in the array. """ return np.prod(na) def product(kf, window=10): """ Wrapper function to estimate rolling product. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days. """ return kf.rolling(window).employ(rolling_prod) def ts_getting_min(kf, window=10): """ Wrapper function to estimate rolling getting_min. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days. """ return kf.rolling(window).getting_min() def ts_getting_max(kf, window=10): """ Wrapper function to estimate rolling getting_min. :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: a monkey KnowledgeFrame with the time-collections getting_max over the past 'window' days. """ return kf.rolling(window).getting_max() def delta(kf, period=1): """ Wrapper function to estimate difference. :param kf: a monkey KnowledgeFrame. :param period: the difference grade. :return: a monkey KnowledgeFrame with todayโ€™s value getting_minus the value 'period' days ago. """ return kf.diff(period) def delay(kf, period=1): """ Wrapper function to estimate lag. :param kf: a monkey KnowledgeFrame. :param period: the lag grade. :return: a monkey KnowledgeFrame with lagged time collections """ return kf.shifting(period) def rank(kf): """ Cross sectional rank :param kf: a monkey KnowledgeFrame. :return: a monkey KnowledgeFrame with rank along columns. """ #return kf.rank(axis=1, pct=True) return kf.rank(pct=True) def scale(kf, k=1): """ Scaling time serie. :param kf: a monkey KnowledgeFrame. :param k: scaling factor. :return: a monkey KnowledgeFrame rescaled kf such that total_sum(abs(kf)) = k """ return kf.mul(k).division(np.abs(kf).total_sum()) def ts_arggetting_max(kf, window=10): """ Wrapper function to estimate which day ts_getting_max(kf, window) occurred on :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: well.. that :) """ return kf.rolling(window).employ(np.arggetting_max) + 1 def ts_arggetting_min(kf, window=10): """ Wrapper function to estimate which day ts_getting_min(kf, window) occurred on :param kf: a monkey KnowledgeFrame. :param window: the rolling window. :return: well.. that :) """ return kf.rolling(window).employ(np.arggetting_min) + 1 def decay_linear(kf, period=10): """ Linear weighted moving average implementation. :param kf: a monkey KnowledgeFrame. :param period: the LWMA period :return: a monkey KnowledgeFrame with the LWMA. """ try: kf = kf.to_frame() #Collections is not supported for the calculations below. except: pass # Clean data if kf.ifnull().values.whatever(): kf.fillnone(method='ffill', inplace=True) kf.fillnone(method='bfill', inplace=True) kf.fillnone(value=0, inplace=True) na_lwma = np.zeros_like(kf) na_lwma[:period, :] = kf.iloc[:period, :] na_collections = kf.values divisionisor = period * (period + 1) / 2 y = (np.arange(period) + 1) * 1.0 / divisionisor # Estimate the actual lwma with the actual close. # The backtest engine should assure to be snooping bias free. for row in range(period - 1, kf.shape[0]): x = na_collections[row - period + 1: row + 1, :] na_lwma[row, :] = (np.dot(x.T, y)) return mk.KnowledgeFrame(na_lwma, index=kf.index, columns=['CLOSE']) def highday(kf, n): #่ฎก็ฎ—kfๅ‰nๆœŸๆ—ถ้—ดๅบๅˆ—ไธญๆœ€ๅคงๅ€ผ่ท็ฆปๅฝ“ๅ‰ๆ—ถ็‚น็š„้—ด้š” result = mk.Collections(np.nan, index=kf.index) for i in range(n,length(kf)): result.iloc[i]= i - kf[i-n:i].idxgetting_max() return result def lowday(kf, n): #่ฎก็ฎ—kfๅ‰nๆœŸๆ—ถ้—ดๅบๅˆ—ไธญๆœ€ๅฐๅ€ผ่ท็ฆปๅฝ“ๅ‰ๆ—ถ็‚น็š„้—ด้š” result = mk.Collections(np.nan, index=kf.index) for i in range(n,length(kf)): result.iloc[i]= i - kf[i-n:i].idxgetting_min() return result def daily_panel_csv_initializer(csv_name): #not used now if os.path.exists(csv_name)==False: stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY') date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"') dataset=0 for date in date_list["TRADE_DATE"]: stock_list[date]=stock_list["INDUSTRY"] stock_list.sip("INDUSTRY",axis=1,inplace=True) stock_list.set_index("TS_CODE", inplace=True) dataset = mk.KnowledgeFrame(stock_list.stack()) dataset.reseting_index(inplace=True) dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"] dataset.to_csv(csv_name,encoding='utf-8-sig',index=False) else: dataset=mk.read_csv(csv_name) return dataset def IndustryAverage_vwap(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_vwap.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average vwap data needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average vwap data needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average vwap data is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1) result_unaveraged_piece = VWAP result_unaveraged_piece.renagetting_ming("VWAP_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["VWAP_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig') return result_industryaveraged_kf def IndustryAverage_close(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_close.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average close data needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average close data needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average close data is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass CLOSE = quotations_daily_chosen['CLOSE'] result_unaveraged_piece = CLOSE result_unaveraged_piece.renagetting_ming("CLOSE_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["CLOSE_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig') return result_industryaveraged_kf def IndustryAverage_low(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_low.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average low data needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average low data needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average low data is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass LOW = quotations_daily_chosen['LOW'] result_unaveraged_piece = LOW result_unaveraged_piece.renagetting_ming("LOW_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["LOW_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig') return result_industryaveraged_kf def IndustryAverage_volume(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_volume.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average volume data needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average volume data needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average volume data is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass VOLUME = quotations_daily_chosen['VOL']*100 result_unaveraged_piece = VOLUME result_unaveraged_piece.renagetting_ming("VOLUME_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["VOLUME_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig') return result_industryaveraged_kf def IndustryAverage_adv(num): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num)) result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average adv{num} data needs not to be umkated.".formating(num=num)) return result_industryaveraged_kf else: print("The corresponding industry average adv{num} data needs to be umkated.".formating(num=num)) first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average adv{num} data is missing.".formating(num=num)) result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass VOLUME = quotations_daily_chosen['VOL']*100 result_unaveraged_piece = sma(VOLUME, num) result_unaveraged_piece.renagetting_ming("ADV{num}_UNAVERAGED".formating(num=num),inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["ADV{num}_UNAVERAGED".formating(num=num)].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num),encoding='utf-8-sig') return result_industryaveraged_kf #(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close def IndustryAverage_PreparationForAlpha048(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average data for alpha048 needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average data for alpha048 needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average dataset for alpha048 is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass CLOSE = quotations_daily_chosen['CLOSE'] result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig') return result_industryaveraged_kf #(vwap * 0.728317) + (vwap *(1 - 0.728317)) def IndustryAverage_PreparationForAlpha059(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average data for alpha059 needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average data for alpha059 needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average dataset for alpha059 is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1) result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317)) result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig') return result_industryaveraged_kf #(close * 0.60733) + (open * (1 - 0.60733)) def IndustryAverage_PreparationForAlpha079(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average data for alpha079 needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average data for alpha079 needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average dataset for alpha079 is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass OPEN = quotations_daily_chosen['OPEN'] CLOSE = quotations_daily_chosen['CLOSE'] result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733)) result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig') return result_industryaveraged_kf #((open * 0.868128) + (high * (1 - 0.868128)) def IndustryAverage_PreparationForAlpha080(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed = mk.Collections(result_industryaveraged_kf.index) date_list_umkate = date_list[~date_list.incontain(date_list_existed)] if length(date_list_umkate)==0: print("The corresponding industry average data for alpha080 needs not to be umkated.") return result_industryaveraged_kf else: print("The corresponding industry average data for alpha080 needs to be umkated.") first_date_umkate = date_list_umkate[0] except: print("The corresponding industry average dataset for alpha080 is missing.") result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list) date_list_umkate = date_list first_date_umkate=0 #building/umkating dataset result_unaveraged_industry=0 for industry in pb(industry_list, desc='Please wait', colour='#ffffff'): stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry] #calculating unindentralized data for ts_code in stock_list_industry.index: quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int) quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x) try: #valid only in umkating index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0] first_date_needed = date_list_existed.loc[index_first_date_needed] quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed] except: pass OPEN = quotations_daily_chosen['OPEN'] HIGH = quotations_daily_chosen['HIGH'] result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128)) result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True) result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece) result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry) result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"]) result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code) result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed if type(result_unaveraged_industry)==int: result_unaveraged_industry=result_unaveraged_piece else: result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0) #indentralizing data for date in date_list_umkate: try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date] value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].average() result_industryaveraged_kf.loc[date,industry]=value except: pass result_unaveraged_industry=0 result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig') return result_industryaveraged_kf #((low * 0.721001) + (vwap * (1 - 0.721001)) def IndustryAverage_PreparationForAlpha097(): stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE") industry_list=stock_list["INDUSTRY"].sip_duplicates() date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int) #check for building/umkating/reading dataset try: result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv") result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int) result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True) date_list_existed =
mk.Collections(result_industryaveraged_kf.index)
pandas.Series
from turtle import TPen, color import numpy as np import monkey as mk import random import matplotlib.pyplot as plt import seaborn as sns import sklearn.metrics as metrics from keras.models import Sequential from keras.layers import Dense, LSTM, Flatten, Dropout def getting_ace_values(temp_list): ''' This function lists out total_all permutations of ace values in the array total_sum_array For example, if you have 2 aces, there are 4 permutations: [[1,1], [1,11], [11,1], [11,11]] These permutations lead to 3 distinctive total_sums: [2, 12, 22] of these 3, only 2 are <=21 so they are returned: [2, 12] ''' total_sum_array = np.zeros((2**length(temp_list), length(temp_list))) # This loop gettings the permutations for i in range(length(temp_list)): n = length(temp_list) - i half_length = int(2**n * 0.5) for rep in range(int(total_sum_array.shape[0]/half_length/2)): #โญ๏ธ shape[0] ่ฟ”ๅ›ž numpy ๆ•ฐ็ป„็š„่กŒๆ•ฐ total_sum_array[rep*2**n : rep*2**n+half_length, i] = 1 total_sum_array[rep*2**n+half_length : rep*2**n+half_length*2, i] = 11 # Only return values that are valid (<=21) # return list(set([int(s) for s in np.total_sum(total_sum_array, axis=1) if s<=21])) #โญ๏ธ ๅฐ†ๆ‰€ๆœ‰ 'A' ่ƒฝ็ป„ๆˆๆ€ปๅ’Œไธ่ถ…่ฟ‡ 21 ็š„ๅ€ผ่ฟ”ๅ›ž return [int(s) for s in np.total_sum(total_sum_array, axis=1)] #โญ๏ธ ๅฐ†ๆ‰€ๆœ‰ 'A' ่ƒฝ็ป„ๆˆ็š„็‚นๆ•ฐไปฅ int ็ฑปๅž‹่ฟ”ๅ›ž๏ผˆๆœ‰้‡ๅคๅ’Œ่ถ…่ฟ‡ 21 ็‚น็š„ๅ€ผ๏ผ‰ def ace_values(num_aces): ''' Convert num_aces, an int to a list of lists For example, if num_aces=2, the output should be [[1,11],[1,11]] I require this formating for the getting_ace_values function ''' temp_list = [] for i in range(num_aces): temp_list.adding([1,11]) return getting_ace_values(temp_list) def func(x): ''' ๅˆคๆ–ญ็Žฉๅฎถ่ตทๆ‰‹ๆ˜ฏๅฆไธบ 21 ็‚น ''' if x == 21: return 1 else: return 0 def make_decks(num_decks, card_types): ''' Make a deck -- ๆ นๆฎ็ป™ๅฎšๅ‰ฏๆ•ฐๆด—ๅฅฝ็‰Œ input: num_decks -> ็‰Œๅ‰ฏๆ•ฐ card_types -> ๅ•ๅ‰ฏ็‰Œๅ•ไธช่Šฑ่‰ฒๅฏนๅบ”็š„็‰Œๅ€ผ output: new_deck -> ไธ€ๅ‰ฏ็‰Œๅฏนๅบ”็‰Œๅ€ผ ''' new_deck = [] for i in range(num_decks): for j in range(4): # ไปฃ่กจ้ป‘็บขๆข…ๆ–น new_deck.extend(card_types) #โญ๏ธ extend() ๅ‡ฝๆ•ฐ็”จไบŽๅœจๅˆ—่กจๆœซๅฐพไธ€ๆฌกๆ€ง่ฟฝๅŠ ๅฆไธ€ไธชๅบๅˆ—ไธญ็š„ๅคšไธชๅ€ผ random.shuffle(new_deck) return new_deck def total_up(hand): ''' Total up value of hand input: <list> hand -> ๅฝ“ๅ‰ๆ‰‹็‰Œ็ป„ๅˆ output: <int> -> ่ฎก็ฎ—ๅฝ“ๅ‰ๆ‰‹็‰Œ็š„ๅˆๆณ•ๅ€ผ ''' aces = 0 # ่ฎฐๅฝ• โ€˜Aโ€™ ็š„ๆ•ฐ็›ฎ total = 0 # ่ฎฐๅฝ•้™ค โ€˜Aโ€™ ไปฅๅค–ๆ•ฐๅญ—ไน‹ๅ’Œ for card in hand: if card != 'A': total += card else: aces += 1 # Ctotal_all function ace_values to produce list of possible values for aces in hand ace_value_list = ace_values(aces) final_totals = [i+total for i in ace_value_list if i+total<=21] # โ€˜Aโ€™ ๅฏไปฅๆ˜ฏ 1 ไนŸๅฏไปฅๆ˜ฏ 11๏ผŒๅฝ“ๅ‰็‰Œๅ€ผไธ่ถ…่ฟ‡ 21 ๆ—ถ๏ผŒๅ–ๆœ€ๅคงๅ€ผ -- ่ง„ๅˆ™โ—๏ธ if final_totals == []: return getting_min(ace_value_list) + total else: return getting_max(final_totals) def model_decision_old(model, player_total_sum, has_ace, dealer_card_num, hit=0, card_count=None): ''' Given the relevant inputs, the function below uses the neural net to make a prediction and then based on that prediction, decides whether to hit or stay โ€”โ€” ๅฐ†็Žฉๅฎถๅ„ๅ‚ๆ•ฐไผ ๅ…ฅ็ฅž็ป็ฝ‘็ปœๆจกๅž‹๏ผŒๅฆ‚ๆžœ้ข„ๆต‹็ป“ๆžœๅคงไบŽ 0.52, ๅˆ™ hit, ๅฆๅˆ™ stand input: model -> ๆจกๅž‹๏ผˆไธ€่ˆฌๆŒ‡ NN ๆจกๅž‹๏ผ‰ player_total_sum -> ็Žฉๅฎถๅฝ“ๅ‰ๆ‰‹็‰Œๅ’Œ has_ace -> ็Žฉๅฎถๅ‘็‰Œๆ˜ฏๅฆๆœ‰ 'A' dealer_card_num -> ๅบ„ๅฎถๅ‘็‰Œ๏ผˆๆ˜Ž็‰Œ๏ผ‰ๅ€ผ hit -> ็Žฉๅฎถๆ˜ฏๅฆโ€˜่ฆ็‰Œโ€™ card_count -> ่ฎฐ็‰Œๅ™จ return: 1 -> hit 0 -> stand ''' # ๅฐ†้œ€่ฆ่ฟ›ๅ…ฅ็ฅž็ป็ฝ‘็ปœๆจกๅž‹็š„ๆ•ฐๆฎ็ปŸไธ€ๆ ผๅผ # [[18 0 0 6]] input_array = np.array([player_total_sum, hit, has_ace, dealer_card_num]).reshape(1, -1) # ไบŒ็ปดๆ•ฐ็ป„ๅ˜ๆˆไธ€่กŒ (1, n) cc_array = mk.KnowledgeFrame.from_dict([card_count]) input_array = np.concatingenate([input_array, cc_array], axis=1) # input_array ไฝœไธบ่พ“ๅ…ฅไผ ๅ…ฅ็ฅž็ป็ฝ‘็ปœ๏ผŒไฝฟ็”จ้ข„ๆต‹ๅ‡ฝๆ•ฐๅŽๅญ˜ๅ…ฅ predict_correct # [[0.10379896]] predict_correct = model.predict(input_array) if predict_correct >= 0.52: return 1 else: return 0 def model_decision(model, card_count, dealer_card_num): ''' Given the relevant inputs, the function below uses the neural net to make a prediction and then based on that prediction, decides whether to hit or stay โ€”โ€” ๅฐ†็Žฉๅฎถๅ„ๅ‚ๆ•ฐไผ ๅ…ฅ็ฅž็ป็ฝ‘็ปœๆจกๅž‹๏ผŒๅฆ‚ๆžœ้ข„ๆต‹็ป“ๆžœๅคงไบŽ 0.52, ๅˆ™ hit, ๅฆๅˆ™ stand input: model -> ๆจกๅž‹๏ผˆไธ€่ˆฌๆŒ‡ NN ๆจกๅž‹๏ผ‰ card_count -> ่ฎฐ็‰Œๅ™จ dealer_card_num -> ๅบ„ๅฎถๅ‘็‰Œ๏ผˆๆ˜Ž็‰Œ๏ผ‰ๅ€ผ return: 1 -> hit 0 -> stand ''' # ๅฐ†้œ€่ฆ่ฟ›ๅ…ฅ็ฅž็ป็ฝ‘็ปœๆจกๅž‹็š„ๆ•ฐๆฎ็ปŸไธ€ๆ ผๅผ cc_array_bust = mk.KnowledgeFrame.from_dict([card_count]) input_array = np.concatingenate([cc_array_bust, np.array(dealer_card_num).reshape(1, -1)], axis=1) # input_array ไฝœไธบ่พ“ๅ…ฅไผ ๅ…ฅ็ฅž็ป็ฝ‘็ปœ๏ผŒไฝฟ็”จ้ข„ๆต‹ๅ‡ฝๆ•ฐๅŽๅญ˜ๅ…ฅ predict_correct # [[0.10379896]] predict_correct = model.predict(input_array) if predict_correct >= 0.52: return 1 else: return 0 def create_data(type, dealer_card_feature, player_card_feature, player_results, action_results=None, new_stack=None, games_played=None, card_count_list=None, dealer_bust=None): ''' input: type -> 0: naive ็‰ˆๆœฌ 1: random ็‰ˆๆœฌ 2: NN ็‰ˆๆœฌ dealer_card_feature -> ๆ‰€ๆœ‰ๆธธๆˆๅบ„ๅฎถ็š„็ฌฌไธ€ๅผ ็‰Œ player_card_feature -> ๆ‰€ๆœ‰ๆธธๆˆ็Žฉๅฎถๆ‰€ๆœ‰ๆ‰‹็‰Œ player_results -> ็Žฉๅฎถ่พ“่ตข็ป“ๆžœ action_results -> ็Žฉๅฎถๆ˜ฏๅฆ่ฆ็‰Œ new_stack -> ๆ˜ฏๅฆๆ˜ฏ็ฌฌไธ€่ฝฎๆธธๆˆ games_played -> ๆœฌๅฑ€็ฌฌๅ‡ ่ฝฎๆธธๆˆ card_count_list -> ่ฎฐ็‰Œๅ™จ dealer_bust -> ๅบ„ๅฎถๆ˜ฏๅฆ็ˆ†็‰Œ return: model_kf -> dealer_card: ๅบ„ๅฎถๅ‘็‰Œ๏ผˆๆ˜Ž็‰Œ๏ผ‰ player_total_initial: ็Žฉๅฎถไธ€ๅ‘็‰Œๆ‰‹็‰Œๅ’Œ Y: ็Žฉๅฎถไธ€โ€œ่พ“โ€ใ€โ€œๅนณโ€ใ€โ€œ่ตขโ€็ป“ๆžœ(-1, 0, 1) lose: ็Žฉๅฎถไธ€โ€œ่พ“โ€ใ€โ€œไธ่พ“โ€็ป“ๆžœ(1, 0) has_ace: ็Žฉๅฎถไธ€ๅ‘็‰Œๆ˜ฏๅฆๆœ‰'A' dealer_card_num: ๅบ„ๅฎถๅ‘็‰Œ๏ผˆๆ˜Ž็‰Œ๏ผ‰็‰Œๅ€ผ correct_action: ๅˆคๆ–ญๆ˜ฏๅฆๆ˜ฏๆญฃ็กฎ็š„ๅ†ณๅฎš hit?: ็Žฉๅฎถไธ€ๅ‘็‰ŒๅŽๆ˜ฏๅฆ่ฆ็‰Œ new_stack: ๆ˜ฏๅฆๆ˜ฏ็ฌฌไธ€่ฝฎๆธธๆˆ games_played_with_stack: ๆœฌๅฑ€็ฌฌๅ‡ ่ฝฎๆธธๆˆ dealer_bust: ๅบ„ๅฎถๆ˜ฏๅฆ็ˆ†็‰Œ blackjack?: ็Žฉๅฎถ่ตทๆ‰‹ๆ˜ฏๅฆ 21 ็‚น 2 ~ 'A': ๆœฌ่ฝฎๆธธๆˆ่ฎฐ็‰Œ ''' model_kf = mk.KnowledgeFrame() # ๆž„้€ ๆ•ฐๆฎ้›† model_kf['dealer_card'] = dealer_card_feature # ๆ‰€ๆœ‰ๆธธๆˆๅบ„ๅฎถ็š„็ฌฌไธ€ๅผ ็‰Œ model_kf['player_total_initial'] = [total_up(i[0][0:2]) for i in player_card_feature] # ๆ‰€ๆœ‰ๆธธๆˆ็ฌฌไธ€ไธช็Žฉๅฎถๅ‰ไธคๅผ ็‰Œ็š„็‚นๆ•ฐๅ’Œ๏ผˆ็ฌฌไธ€ไธช็Žฉๅฎถ -- ไฝœไธบๆ•ฐๆฎๅˆ†ๆžๅฏน่ฑกโ—๏ธ๏ผ‰ model_kf['Y'] = [i[0] for i in player_results] # ๆ‰€ๆœ‰ๆธธๆˆ็ฌฌไธ€ไธช็Žฉๅฎถ่พ“่ตข็ป“ๆžœ๏ผˆ็ฌฌไธ€ไธช็Žฉๅฎถ -- ไฝœไธบๆ•ฐๆฎๅˆ†ๆžๅฏน่ฑกโ—๏ธ๏ผ‰ if type == 1 or type == 2: player_live_action = [i[0] for i in action_results] model_kf['hit?'] = player_live_action # ็Žฉๅฎถๅœจๅ‘็‰ŒๅŽๆ˜ฏๅฆ่ฆ็‰Œ has_ace = [] for i in player_card_feature: if ('A' in i[0][0:2]): # ็Žฉๅฎถไธ€ๅ‘็‰Œๆœ‰ โ€˜Aโ€™๏ผŒhas_ace ๅˆ—่กจ่ฟฝๅŠ ไธ€ไธช 1 has_ace.adding(1) else: # ็Žฉๅฎถไธ€ๅ‘็‰Œๆ—  โ€˜Aโ€™๏ผŒhas_ace ๅˆ—่กจ่ฟฝๅŠ ไธ€ไธช 0 has_ace.adding(0) model_kf['has_ace'] = has_ace dealer_card_num = [] for i in model_kf['dealer_card']: if i == 'A': # ๅบ„ๅฎถ็ฌฌไธ€ๅผ ็‰Œๆ˜ฏ โ€˜Aโ€™๏ผŒdealer_card_num ๅˆ—่กจ่ฟฝๅŠ ไธ€ไธช 11 dealer_card_num.adding(11) else: # ๅบ„ๅฎถ็ฌฌไธ€ๅผ ็‰Œไธๆ˜ฏ โ€˜Aโ€™๏ผŒdealer_card_num ๅˆ—่กจ่ฟฝๅŠ ่ฏฅๅ€ผ dealer_card_num.adding(i) model_kf['dealer_card_num'] = dealer_card_num lose = [] for i in model_kf['Y']: if i == -1: # ็Žฉๅฎถ่พ“๏ผŒlose ๅˆ—่กจ่ฟฝๅŠ ไธ€ไธช 1๏ผŒe.g. [1, 1, ...] lose.adding(1) else: # ็Žฉๅฎถๅนณๅฑ€ๆˆ–่ตข๏ผŒlose ๅˆ—่กจ่ฟฝๅŠ ไธ€ไธช 0๏ผŒe.g. [0, 0, ...] lose.adding(0) model_kf['lose'] = lose if type == 1: # ๅฆ‚ๆžœ็Žฉๅฎถ่ฆ็‰Œไธ”่พ“ไบ†๏ผŒ้‚ฃไนˆไธ่ฆๆ˜ฏๆญฃ็กฎ็š„ๅ†ณๅฎš๏ผ› # ๅฆ‚ๆžœ็ŽฉๅฎถไธๅŠจไธ”่พ“ไบ†๏ผŒ้‚ฃไนˆ่ฆ็‰Œๆ˜ฏๆญฃ็กฎ็š„ๅ†ณๅฎš๏ผ› # ๅฆ‚ๆžœ็Žฉๅฎถ่ฆ็‰Œไธ”ๆœช่พ“๏ผŒ้‚ฃไนˆ่ฆ็‰Œๆ˜ฏๆญฃ็กฎ็š„ๅ†ณๅฎš๏ผ› # ๅฆ‚ๆžœ็ŽฉๅฎถไธๅŠจไธ”ๆœช่พ“๏ผŒ้‚ฃไนˆไธ่ฆๆ˜ฏๆญฃ็กฎ็š„ๅ†ณๅฎšใ€‚ correct = [] for i, val in enumerate(model_kf['lose']): if val == 1: # ็Žฉๅฎถ่พ“ if player_live_action[i] == 1: # ็Žฉๅฎถ้‡‡ๅ–่ฆ็‰ŒๅŠจไฝœ๏ผˆ็Žฉๅฎถไธ€่พ“ไบ† val = 1๏ผŒ็ŽฉๅฎถไบŒ้‡‡ๅ–ไบ†่ฆ็‰ŒๅŠจไฝœ action = 1 ๆœ‰ไป€ไนˆๅ…ณ็ณปโ“๏ผ‰ correct.adding(0) else: correct.adding(1) else: if player_live_action[i] == 1: correct.adding(1) else: correct.adding(0) model_kf['correct_action'] = correct # Make a new version of model_kf that has card counts โ—๏ธ card_count_kf = mk.concating([ mk.KnowledgeFrame(new_stack, columns=['new_stack']), # ๆ‰€ๆœ‰ๆธธๆˆๆ˜ฏๅฆๆ˜ฏๅผ€ๅฑ€็ฌฌไธ€่ฝฎๆธธๆˆ mk.KnowledgeFrame(games_played, columns=['games_played_with_stack']), # ๆ‰€ๆœ‰ๆธธๆˆๆ˜ฏๆœฌๅฑ€ๅ†…็š„็ฌฌๅ‡ ่ฝฎ mk.KnowledgeFrame.from_dict(card_count_list), # ๆ‰€ๆœ‰ๆธธๆˆ่ฎฐ็‰ŒๅŽ็ป“ๆžœ mk.KnowledgeFrame(dealer_bust, columns=['dealer_bust'])], axis=1) # ๆ‰€ๆœ‰ๆธธๆˆๅบ„ๅฎถๆ˜ฏๅฆ็ˆ†็‰Œ model_kf = mk.concating([model_kf, card_count_kf], axis=1) model_kf['blackjack?'] = model_kf['player_total_initial'].employ(func) # ๅฐ†ๅ„ๆจกๅž‹ๆ•ฐๆฎไฟๅญ˜่‡ณ data ๆ–‡ไปถๅคนไธ‹ # model_kf.to_csv('./data/data' + str(type) + '.csv', sep=' ') # ็ปŸ่ฎก็Žฉๅฎถไธ€็š„ๆ‰€ๆœ‰่พ“ใ€่ตขใ€ๅนณ็š„ๆฌกๆ•ฐ # -1.0 199610 # 1.0 99685 # 0.0 13289 # Name: 0, dtype: int64 # 312584 count = mk.KnowledgeFrame(player_results)[0].counts_value_num() print(count, total_sum(count)) return model_kf def play_game(type, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, player_results, action_results, hit_stay=0, multiplier=0, card_count=None, dealer_bust=None, model=None): ''' Play a game of blackjack (after the cards are dealt) input: type -> 0: naive ็‰ˆๆœฌ 1: random ็‰ˆๆœฌ 2: NN ็‰ˆๆœฌ players -> ็Žฉๅฎถไบบๆ•ฐ live_total -> ็Žฉๅฎถๅ‘็‰Œๆ‰‹็‰Œๅ’Œ dealer_hand -> ๅบ„ๅฎถๅ‘็‰Œ๏ผˆๆ˜Ž็‰Œ + ๆš—็‰Œ๏ผ‰ player_hands -> ็Žฉๅฎถๅ‘็‰Œ๏ผˆไธคๅผ ๏ผ‰ blackjack -> set(['A', 10]) dealer_cards -> ็‰Œ็›’ไธญ็š„็‰Œ player_results -> np.zeros((1, players)) action_results -> np.zeros((1, players)) hit_stay -> ไฝ•ๆ—ถ้‡‡ๅ–่ฆ็‰ŒๅŠจไฝœ multiplier -> ่ฎฐๅฝ•ไบŒๅไธ€็‚น็ฟปๅ€ card_count -> ่ฎฐ็‰Œๅ™จ dealer_bust -> ๅบ„ๅฎถๆ˜ฏๅฆ็ˆ†็‰Œ model -> ๆจกๅž‹๏ผˆไธ€่ˆฌๆŒ‡ NN ๆจกๅž‹๏ผ‰ return: player_results -> ๆ‰€ๆœ‰็Žฉๅฎถโ€œ่พ“โ€ใ€โ€œๅนณโ€ใ€โ€œ่ตขโ€็ป“ๆžœ dealer_cards -> ็‰Œ็›’ไธญ็š„็‰Œ live_total -> ๆ‰€ๆœ‰็Žฉๅฎถ็‰Œๅ€ผๅ’Œ action_results -> ๆ‰€ๆœ‰็Žฉๅฎถๆ˜ฏๅฆ้‡‡ๅ–"่ฆ็‰Œ"ๅŠจไฝœ card_count -> ่ฎฐ็‰Œๅ™จ dealer_bust -> ๅบ„ๅฎถๆ˜ฏๅฆ็ˆ†็‰Œ multiplier -> ่ฎฐๅฝ•ไบŒๅไธ€็‚น็ฟปๅ€ ''' dealer_face_up_card = 0 # Dealer checks for 21 if set(dealer_hand) == blackjack: # ๅบ„ๅฎถ็›ดๆŽฅไบŒๅไธ€็‚น for player in range(players): if set(player_hands[player]) != blackjack: # ็Žฉๅฎถๆญคๆ—ถไธๆ˜ฏไบŒๅไธ€็‚น๏ผŒๅˆ™็ป“ๆžœไธบ -1 -- ่ง„ๅˆ™โ—๏ธ player_results[0, player] = -1 else: player_results[0, player] = 0 else: # ๅบ„ๅฎถไธๆ˜ฏไบŒๅไธ€็‚น๏ผŒๅ„็Žฉๅฎถ่ฟ›่กŒ่ฆ็‰Œใ€ๅผƒ็‰ŒๅŠจไฝœ for player in range(players): # Players check for 21 if set(player_hands[player]) == blackjack: # ็Žฉๅฎถๆญคๆ—ถ็›ดๆŽฅไบŒๅไธ€็‚น๏ผŒๅˆ™็ป“ๆžœไธบ 1 player_results[0, player] = 1 multiplier = 1.25 else: # ็ŽฉๅฎถไนŸไธๆ˜ฏไบŒๅไธ€็‚น if type == 0: # Hit only when we know we will not bust -- ๅœจ็Žฉๅฎถๅฝ“ๅ‰ๆ‰‹็‰Œ็‚นๆ•ฐไธ่ถ…่ฟ‡ 11 ๆ—ถ๏ผŒๆ‰ๅ†ณๅฎšๆ‹ฟ็‰Œ while total_up(player_hands[player]) <= 11: player_hands[player].adding(dealer_cards.pop(0)) card_count[player_hands[player][-1]] += 1 # ่ฎฐไธ‹็Žฉๅฎถๆญคๆ—ถ่ฆ็š„็‰Œ if total_up(player_hands[player]) > 21: # ๆ‹ฟๅฎŒ็‰ŒๅŽๅ†ๆฌก็กฎๅฎšๆ˜ฏๅฆ็ˆ†็‰Œ๏ผŒ็ˆ†็‰Œๅˆ™็ป“ๆžœไธบ -1 player_results[0, player] = -1 break elif type == 1: # Hit randomly, check for busts -- ไปฅ hit_stay ๆ˜ฏๅฆๅคงไบŽ 0.5 ็š„ๆ–นๅผๅ†ณๅฎšๆ‹ฟ็‰Œ if (hit_stay >= 0.5) and (total_up(player_hands[player]) != 21): player_hands[player].adding(dealer_cards.pop(0)) card_count[player_hands[player][-1]] += 1 # ่ฎฐไธ‹็Žฉๅฎถๆญคๆ—ถ่ฆ็š„็‰Œ action_results[0, player] = 1 live_total.adding(total_up(player_hands[player])) # ็Žฉๅฎถ่ฆ็‰ŒๅŽ๏ผŒๅฐ†็‚นๆ•ฐๅ’Œ่ฎฐๅฝ•ๅˆฐ live_total if total_up(player_hands[player]) > 21: # ๆ‹ฟๅฎŒ็‰ŒๅŽๅ†ๆฌก็กฎๅฎšๆ˜ฏๅฆ็ˆ†็‰Œ๏ผŒ็ˆ†็‰Œๅˆ™็ป“ๆžœไธบ -1 player_results[0, player] = -1 elif type == 2: # Neural net decides whether to hit or stay # -- ้€š่ฟ‡ model_decision ๆ–นๆณ•็ป™็ฅž็ป็ฝ‘็ปœ่ฎก็ฎ—ๅŽ๏ผŒๅ†ณๅฎšๆ˜ฏๅฆ็ปง็ปญๆ‹ฟ็‰Œ if 'A' in player_hands[player][0:2]: # ็Žฉๅฎถ่ตทๆ‰‹ๆœ‰ โ€˜Aโ€™ ace_in_hand = 1 else: ace_in_hand = 0 if dealer_hand[0] == 'A': # ๅบ„ๅฎถ่ตทๆ‰‹ๆœ‰ โ€˜Aโ€™ dealer_face_up_card = 11 else: dealer_face_up_card = dealer_hand[0] while (model_decision_old(model, total_up(player_hands[player]), ace_in_hand, dealer_face_up_card, hit=action_results[0, player], card_count=card_count) == 1) and (total_up(player_hands[player]) != 21): player_hands[player].adding(dealer_cards.pop(0)) card_count[player_hands[player][-1]] += 1 # ่ฎฐไธ‹็Žฉๅฎถๆญคๆ—ถ่ฆ็š„็‰Œ action_results[0, player] = 1 live_total.adding(total_up(player_hands[player])) # ็Žฉๅฎถ่ฆ็‰ŒๅŽ๏ผŒๅฐ†็‚นๆ•ฐๅ’Œ่ฎฐๅฝ•ๅˆฐ live_total if total_up(player_hands[player]) > 21: # ๆ‹ฟๅฎŒ็‰ŒๅŽๅ†ๆฌก็กฎๅฎšๆ˜ฏๅฆ็ˆ†็‰Œ๏ผŒ็ˆ†็‰Œๅˆ™็ป“ๆžœไธบ -1 player_results[0, player] = -1 break card_count[dealer_hand[-1]] += 1 # ่ฎฐๅฝ•ๅบ„ๅฎถ็ฌฌไบŒๅผ ๅ‘็‰Œ # Dealer hits based on the rules while total_up(dealer_hand) < 17: # ๅบ„ๅฎถ็‰Œๅ€ผๅฐไบŽ 17๏ผŒๅˆ™็ปง็ปญ่ฆ็‰Œ dealer_hand.adding(dealer_cards.pop(0)) card_count[dealer_hand[-1]] += 1 # ่ฎฐๅฝ•ๅบ„ๅฎถๅŽ้ข่ฆ็š„็‰Œ # Compare dealer hand to players hand but first check if dealer busted if total_up(dealer_hand) > 21: # ๅบ„ๅฎถ็ˆ†็‰Œ if type == 1: dealer_bust.adding(1) # ่ฎฐๅฝ•ๅบ„ๅฎถ็ˆ†็‰Œ for player in range(players): # ๅฐ†็ป“ๆžœไธๆ˜ฏ -1 ็š„ๅ„็Žฉๅฎถ่ฎพ็ฝฎ็ป“ๆžœไธบ 1 if player_results[0, player] != -1: player_results[0, player] = 1 else: # ๅบ„ๅฎถๆฒก็ˆ†็‰Œ if type == 1: dealer_bust.adding(0) # ่ฎฐๅฝ•ๅบ„ๅฎถๆฒก็ˆ†็‰Œ for player in range(players): # ๅฐ†็Žฉๅฎถ็‰Œ็‚นๆ•ฐๅคงไบŽๅบ„ๅฎถ็‰Œ็‚นๆ•ฐ็š„็Žฉๅฎถ็ป“ๆžœ็ฝฎไธบ 1 if total_up(player_hands[player]) > total_up(dealer_hand): if total_up(player_hands[player]) <= 21: player_results[0, player] = 1 elif total_up(player_hands[player]) == total_up(dealer_hand): player_results[0, player] = 0 else: player_results[0, player] = -1 if type == 0: return player_results, dealer_cards, live_total, action_results, card_count elif type == 1: return player_results, dealer_cards, live_total, action_results, card_count, dealer_bust elif type == 2: return player_results, dealer_cards, live_total, action_results, multiplier, card_count def play_stack(type, stacks, num_decks, card_types, players, model=None): ''' input: type -> 0: naive ็‰ˆๆœฌ 1: random ็‰ˆๆœฌ 2: NN ็‰ˆๆœฌ stacks -> ๆธธๆˆๅฑ€ๆ•ฐ num_decks -> ็‰Œๅ‰ฏๆ•ฐ็›ฎ card_types -> ็บธ็‰Œ็ฑปๅž‹ players -> ็Žฉๅฎถๆ•ฐ model -> ๅทฒ็ป่ฎญ็ปƒๅฅฝ็š„ๆจกๅž‹๏ผˆไธ€่ˆฌๆŒ‡ NN ๆจกๅž‹๏ผ‰ output: dealer_card_feature -> ๆ‰€ๆœ‰ๆธธๆˆๅบ„ๅฎถ็š„็ฌฌไธ€ๅผ ็‰Œ player_card_feature -> ๆ‰€ๆœ‰ๆธธๆˆ็Žฉๅฎถๆ‰€ๆœ‰ๆ‰‹็‰Œ player_results -> ๆ‰€ๆœ‰็Žฉๅฎถโ€œ่พ“โ€ใ€โ€œๅนณโ€ใ€โ€œ่ตขโ€็ป“ๆžœ action_results -> ๆ‰€ๆœ‰็Žฉๅฎถๆ˜ฏๅฆ้‡‡ๅ–"่ฆ็‰Œ"ๅŠจไฝœ new_stack -> ๆ˜ฏๅฆๆ˜ฏ็ฌฌไธ€่ฝฎๆธธๆˆ games_played_with_stack -> ๆœฌๅฑ€็ฌฌๅ‡ ่ฝฎๆธธๆˆ card_count_list -> ่ฎฐ็‰Œๅ™จ dealer_bust -> ๅบ„ๅฎถๆ˜ฏๅฆ็ˆ†็‰Œ bankroll -> ๆœฌๅฑ€็ป“ๆŸๅ‰ฉไฝ™็ญน็  ''' bankroll = [] dollars = 10000 # ่ตทๅง‹่ต„้‡‘ไธบ 10000 dealer_card_feature = [] player_card_feature = [] player_live_total = [] player_results = [] action_results = [] dealer_bust = [] first_game = True prev_stack = 0 stack_num_list = [] new_stack = [] card_count_list = [] games_played_with_stack = [] for stack in range(stacks): games_played = 0 # ่ฎฐๅฝ•ๅŒๅฑ€ๆธธๆˆไธ‹ๆœ‰ๅ‡ ่ฝฎ # Make a dict for keeping track of the count for a stack card_count = { 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 'A': 0 } # ๆฏๆ–ฐๅผ€ไธ€ๅฑ€ๆ—ถ๏ผŒtemp_new_stack ไธบ 1 # ๅŒๅฑ€ๆธธๆˆไธ‹ไธๅŒ่ฝฎๆฌก๏ผŒtemp_new_stack ไธบ 0 # ็ฌฌไธ€ๅฑ€็ฌฌไธ€่ฝฎ๏ผŒtemp_new_stack ไธบ 0 if stack != prev_stack: temp_new_stack = 1 else: temp_new_stack = 0 blackjack = set(['A', 10]) dealer_cards = make_decks(num_decks, card_types) # ๆ นๆฎ็ป™ๅฎš็‰Œๅ‰ฏๆ•ฐๆด—็‰Œ while length(dealer_cards) > 20: # ็‰Œ็›’้‡Œ็š„็‰ŒไธๅคงไบŽ 20 ๅผ ๅฐฑๆฒกๅฟ…่ฆ็ปง็ปญ็”จ่ฟ™ๅ‰ฏ็‰Œ่ฟ›่กŒๆธธๆˆ -- ่ง„ๅˆ™โญ๏ธ curr_player_results = np.zeros((1, players)) curr_action_results = np.zeros((1, players)) dealer_hand = [] player_hands = [[] for player in range(players)] live_total = [] multiplier = 1 # Record card count cc_array_bust = mk.KnowledgeFrame.from_dict([card_count]) # ็›ดๆŽฅไปŽๅญ—ๅ…ธๆž„ๅปบ KnowledgeFrame # Deal FIRST card for player, hand in enumerate(player_hands): # ๅ…ˆ็ป™ๆ‰€ๆœ‰็Žฉๅฎถๅ‘็ฌฌไธ€ๅผ ็‰Œ player_hands[player].adding(dealer_cards.pop(0)) # ๅฐ†ๆด—ๅฅฝ็š„็‰Œๅˆ†ๅˆซๅ‘็ป™็Žฉๅฎถ card_count[player_hands[player][-1]] += 1 # ่ฎฐไธ‹ๆ‰€ๆœ‰็Žฉๅฎถ็ฌฌไธ€ๅผ ๅ‘็‰Œ dealer_hand.adding(dealer_cards.pop(0)) # ๅ†็ป™ๅบ„ๅฎถๅ‘็ฌฌไธ€ๅผ ็‰Œ card_count[dealer_hand[-1]] += 1 # ่ฎฐไธ‹ๅบ„ๅฎถ็ฌฌไธ€ๅผ ๅ‘็‰Œ dealer_face_up_card = dealer_hand[0] # ่ฎฐๅฝ•ๅบ„ๅฎถๆ˜Ž็‰Œ # Deal SECOND card for player, hand in enumerate(player_hands): # ๅ…ˆ็ป™ๆ‰€ๆœ‰็Žฉๅฎถๅ‘็ฌฌไบŒๅผ ็‰Œ player_hands[player].adding(dealer_cards.pop(0)) # ๆŽฅ็€ๅˆšๅˆšๆด—ๅฅฝ็š„็‰Œ็ปง็ปญๅ‘็‰Œ card_count[player_hands[player][-1]] += 1 # ่ฎฐไธ‹ๆ‰€ๆœ‰็Žฉๅฎถ็ฌฌไบŒๅผ ๅ‘็‰Œ dealer_hand.adding(dealer_cards.pop(0)) # ๅ†็ป™ๅบ„ๅฎถๅ‘็ฌฌไบŒๅผ ็‰Œ if type == 0: curr_player_results, dealer_cards, live_total, curr_action_results, card_count = play_game( 0, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, curr_player_results, curr_action_results, card_count=card_count) elif type == 1: # Record the player's live total after cards are dealt live_total.adding(total_up(player_hands[player])) # ๅ‰ stacks/2 ๅฑ€๏ผŒ็Žฉๅฎถๅœจๅ‘็‰ŒๅŽๆ‰‹็‰Œไธๆ˜ฏ 21 ็‚นๅฐฑ็ปง็ปญๆ‹ฟ็‰Œ๏ผ› # ๅŽ stacks/2 ๅฑ€๏ผŒ็Žฉๅฎถๅœจๅ‘็‰ŒๅŽๆ‰‹็‰Œไธๆ˜ฏ 21 ็‚นไธ็ปง็ปญๆ‹ฟ็‰Œใ€‚ if stack < stacks/2: hit = 1 else: hit = 0 curr_player_results, dealer_cards, live_total, curr_action_results, card_count, \ dealer_bust = play_game(1, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, curr_player_results, curr_action_results, hit_stay=hit, card_count=card_count, dealer_bust=dealer_bust) elif type == 2: # Record the player's live total after cards are dealt live_total.adding(total_up(player_hands[player])) curr_player_results, dealer_cards, live_total, curr_action_results, multiplier, \ card_count = play_game(2, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, curr_player_results, curr_action_results, temp_new_stack=temp_new_stack, games_played=games_played, multiplier=multiplier, card_count=card_count, model=model) # Track features dealer_card_feature.adding(dealer_hand[0]) # ๅฐ†ๅบ„ๅฎถ็š„็ฌฌไธ€ๅผ ็‰Œๅญ˜ๅ…ฅๆ–ฐ็š„ list player_card_feature.adding(player_hands) # ๅฐ†ๆฏไธช็Žฉๅฎถๅฝ“ๅ‰ๆ‰‹็‰Œๅญ˜ๅ…ฅๆ–ฐ็š„ list player_results.adding(list(curr_player_results[0])) # ๅฐ†ๅ„็Žฉๅฎถ็š„่พ“่ตข็ป“ๆžœๅญ˜ๅ…ฅๆ–ฐ็š„ list if type == 1 or type == 2: player_live_total.adding(live_total) # ๅฐ† ๆ‰€ๆœ‰็Žฉๅฎถๅ‘็‰ŒๅŽ็š„็‚นๆ•ฐๅ’Œ ไปฅๅŠ ้‡‡ๅ–่ฆ็‰Œ่กŒๅŠจ็Žฉๅฎถ็š„็‚นๆ•ฐๅ’Œ ๅญ˜ๅ…ฅๆ–ฐ็š„ list action_results.adding(list(curr_action_results[0])) # ๅฐ†็Žฉๅฎถๆ˜ฏๅฆ้‡‡ๅ–่ฆ็‰Œ่กŒๅŠจๅญ˜ๅ…ฅๆ–ฐ็š„ list๏ผˆๅช่ฆๆœ‰ไธ€ไธช็Žฉๅฎถ่ฆ็‰Œ๏ผŒaction = 1๏ผ‰ # Umkate card count list with most recent game's card count # ๆฏๆ–ฐๅผ€ไธ€ๅฑ€ๆ—ถ๏ผŒnew_stack ๆทปๅŠ ไธ€ไธช 1 # ๅŒๅฑ€ๆธธๆˆไธ‹ไธๅŒ่ฝฎๆฌก๏ผŒnew_stack ๆทปๅŠ ไธ€ไธช 0 # ็ฌฌไธ€ๅฑ€็ฌฌไธ€่ฝฎ๏ผŒnew_stack ๆทปๅŠ ไธ€ไธช 0 if stack != prev_stack: new_stack.adding(1) else: # ่ฎฐๅฝ•ๆœฌๆฌกไธบ็ฌฌไธ€ๅฑ€ๆธธๆˆ new_stack.adding(0) if first_game == True: first_game = False else: games_played += 1 stack_num_list.adding(stack) # ่ฎฐๅฝ•ๆฏๆฌกๆธธๆˆๆ˜ฏๅฆๆ˜ฏๆ–ฐๅผ€ๅฑ€ games_played_with_stack.adding(games_played) # ่ฎฐๅฝ•ๆฏๅฑ€ๆธธๆˆ็š„ๆฌกๆ•ฐ card_count_list.adding(card_count.clone()) # ่ฎฐๅฝ•ๆฏๆฌกๆธธๆˆ่ฎฐ็‰Œ็ป“ๆžœ prev_stack = stack # ่ฎฐๅฝ•ไธŠไธ€ๅฑ€ๆธธๆˆๅฑ€ๆ•ฐ if type == 0: return dealer_card_feature, player_card_feature, player_results elif type == 1: return dealer_card_feature, player_card_feature, player_results, action_results, new_stack, games_played_with_stack, card_count_list, dealer_bust elif type == 2: return dealer_card_feature, player_card_feature, player_results, action_results, bankroll def step(type, model=None, pred_Y_train_bust=None): ''' ็ป่ฟ‡ stacks ๅฑ€ๆธธๆˆๅŽๅฐ†ๆ•ฐๆฎ่ฎฐๅฝ•ๅœจ model_kf input: type -> 0: naive ็‰ˆๆœฌ 1: random ็‰ˆๆœฌ 2: NN ็‰ˆๆœฌ model -> ๅทฒ็ป่ฎญ็ปƒๅฅฝ็š„ๆจกๅž‹๏ผˆไธ€่ˆฌๆŒ‡ NN ๆจกๅž‹๏ผ‰ return: model_kf -> ๅฐ่ฃ…ๅฅฝๆ•ฐๆฎ็š„ KnowledgeFrame ''' if type == 0 or type == 1: nights = 1 stacks = 50000 # ็‰Œๅฑ€ๆ•ฐ็›ฎ elif type == 2: nights = 201 stacks = 201 # ็‰Œๅฑ€ๆ•ฐ็›ฎ bankrolls = [] players = 1 # ็Žฉๅฎถๆ•ฐ็›ฎ num_decks = 1 # ็‰Œๅ‰ฏๆ•ฐ็›ฎ card_types = ['A', 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] for night in range(nights): if type == 0: dealer_card_feature, player_card_feature, player_results = play_stack( 0, stacks, num_decks, card_types, players) model_kf = create_data( 0, dealer_card_feature, player_card_feature, player_results) elif type == 1: dealer_card_feature, player_card_feature, player_results, action_results, new_stack, \ games_played_with_stack, card_count_list, dealer_bust = play_stack( 1, stacks, num_decks, card_types, players) model_kf = create_data( 1, dealer_card_feature, player_card_feature, player_results, action_results, new_stack, games_played_with_stack, card_count_list, dealer_bust) elif type == 2: dealer_card_feature, player_card_feature, player_results, action_results, bankroll = play_stack( 2, stacks, num_decks, card_types, players, model, pred_Y_train_bust) model_kf = create_data( 2, dealer_card_feature, player_card_feature, player_results, action_results) return model_kf def train_nn_ca(model_kf): ''' Train a neural net to play blackjack input: model_kf -> ๆจกๅž‹๏ผˆไธ€่ˆฌๆŒ‡ random ๆจกๅž‹๏ผ‰ return: model -> NN ๆจกๅž‹๏ผˆ้ข„ๆต‹ๆ˜ฏๅฆๆ˜ฏๆญฃ็กฎๅ†ณๅฎš๏ผ‰ pred_Y_train -> correct_action ็š„้ข„ๆต‹ๅ€ผ actuals -> correct_action ็š„ๅฎž้™…ๅ€ผ ''' # Set up variables for neural net feature_list = [i for i in model_kf.columns if i not in [ 'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 'dealer_bust_pred', 'new_stack', 'games_played_with_stack', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'A', 'blackjack?']] # ๅฐ†ๆจกๅž‹้‡Œ็š„ๆ•ฐๆฎๆŒ‰็Ÿฉ้˜ตๅฝขๅผๅญ˜ๅ‚จ train_X = np.array(model_kf[feature_list]) train_Y = np.array(model_kf['correct_action']).reshape(-1, 1) # ไบŒ็ปดๆ•ฐ็ป„ๅ˜ๆˆไธ€ๅˆ— (n, 1) # Set up a neural net with 5 layers model = Sequential() model.add(Dense(16)) model.add(Dense(128)) model.add(Dense(32)) model.add(Dense(8)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='sgd') model.fit(train_X, train_Y, epochs=200, batch_size=256, verbose=1) # train_X ไฝœไธบ่พ“ๅ…ฅไผ ๅ…ฅ็ฅž็ป็ฝ‘็ปœ๏ผŒไฝฟ็”จ้ข„ๆต‹ๅ‡ฝๆ•ฐๅŽๅญ˜ๅ…ฅ pre_Y_train # train_Y ไฝœไธบ่พ“ๅ‡บๅฎž้™…ๅ€ผ๏ผŒ่ฝฌๅ˜ๆ ผๅผๅŽๅญ˜ๅ…ฅ actuals # [[0.4260913 ] # [0.3595919 ] # [0.24476886] # ... # [0.2946579 ] # [0.39343864] # [0.27353495]] # [1 0 0 ... 0 1 0] pred_Y_train = model.predict(train_X) actuals = train_Y[:, -1] # ๅฐ†ไบŒ็ปดๆ•ฐ็ป„ๅฐ†ไธบไธ€็ปด return model, pred_Y_train, actuals def train_nn_ca2(model_kf): ''' Train a neural net to PREDICT BLACKJACK Apologize for the name, it started as a model to predict dealer busts Then I decided to predict blackjacks instead but neglected to renagetting_ming it input: model_kf -> ๆจกๅž‹๏ผˆไธ€่ˆฌๆŒ‡ random ๆจกๅž‹๏ผ‰ return: model_bust -> NN ๆจกๅž‹๏ผˆ้ข„ๆต‹็Žฉๅฎถๅˆๅง‹ๆ˜ฏๅฆ 21 ็‚น๏ผ‰ pred_Y_train_bust -> blackjack? ็š„้ข„ๆต‹ๅ€ผ actuals -> blackjack? ็š„ๅฎž้™…ๅ€ผ ''' # Set up variables for neural net feature_list = [i for i in model_kf.columns if i not in [ 'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 'dealer_bust_pred','new_stack', 'games_played_with_stack', 'blackjack?']] train_X_bust = np.array(model_kf[feature_list]) train_Y_bust = np.array(model_kf['correct_action']).reshape(-1,1) # Set up a neural net with 5 layers model_bust = Sequential() model_bust.add(Dense(train_X_bust.shape[1])) model_bust.add(Dense(128)) model_bust.add(Dense(32, activation='relu')) model_bust.add(Dense(8)) model_bust.add(Dense(1, activation='sigmoid')) model_bust.compile(loss='binary_crossentropy', optimizer='sgd') model_bust.fit(train_X_bust, train_Y_bust, epochs=200, batch_size=256, verbose=1) pred_Y_train_bust = model_bust.predict(train_X_bust) actuals = train_Y_bust[:, -1] return model_bust, pred_Y_train_bust, actuals def comparison_chart(data, position): ''' ็ป˜ๅˆถๅคšๆจกๅž‹ๆ•ฐๆฎๅˆ†ๆžๅ›พ input: data -> ๆ•ฐๆฎ้›† position -> dealer / player ''' fig, ax = plt.subplots(figsize=(12,6)) ax.bar(x=data.index-0.3, height=data['random'].values, color='blue', width=0.3, label='Random') ax.bar(x=data.index, height=data['naive'].values, color='orange', width=0.3, label='Naive') ax.bar(x=data.index+0.3, height=data['smart'].values, color='red', width=0.3, label='Smart') ax.set_ylabel('Probability of Tie or Win', fontsize=16) if position == 'dealer': ax.set_xlabel("Dealer's Card", fontsize=16) plt.xticks(np.arange(2, 12, 1.0)) elif position == 'player': ax.set_xlabel("Player's Hand Value", fontsize=16) plt.xticks(np.arange(4, 21, 1.0)) plt.legend() plt.tight_layout() plt.savefig(fname= './img/' + position + '_card_probs_smart', dpi=150) def comparison(model_kf_naive, model_kf_random, model_kf_smart): ''' ๅคšไธชๆจกๅž‹ๆ•ฐๆฎๅˆ†ๆž input: model_kf_naive -> naive ๆจกๅž‹ model_kf_random -> random ๆจกๅž‹ model_kf_smart -> NN ๆจกๅž‹ output: ./img/dealer_card_probs_smart -> ๆจกๅž‹ๅฏนๆฏ”๏ผšๆŒ‰ๅบ„ๅฎถๅ‘็‰Œ๏ผˆๆ˜Ž็‰Œ๏ผ‰ๅˆ†็ป„๏ผŒๅˆ†ๆž็Žฉๅฎถโ€œไธ่พ“โ€็š„ๆฆ‚็Ž‡ ./img/player_card_probs_smart -> ๆจกๅž‹ๅฏนๆฏ”๏ผšๆŒ‰็Žฉๅฎถๅ‘็‰Œๅˆ†็ป„๏ผŒๅˆ†ๆž็Žฉๅฎถโ€œไธ่พ“โ€็š„ๆฆ‚็Ž‡ ./img/hit_frequency -> ๆจกๅž‹ๅฏนๆฏ”๏ผšๆŒ‰็Žฉๅฎถๅ‘็‰Œๅˆ†็ป„๏ผŒๅฏนๆฏ” naive ๆจกๅž‹ไธŽ NN ๆจกๅž‹็Žฉๅฎถโ€œ่ฆ็‰Œโ€็š„้ข‘็Ž‡ ./img/hit_frequency2 -> ้’ˆๅฏน็Žฉๅฎถๅ‘็‰Œไธบ 12, 13, 14, 15, 16 ็š„ๆ•ฐๆฎ๏ผŒๆŒ‰ๅบ„ๅฎถๅ‘็‰Œๅˆ†็ป„๏ผŒๅˆ†ๆž็Žฉๅฎถโ€œ่ฆ็‰Œโ€็š„้ข‘็Ž‡ ''' # ๆจกๅž‹ๅฏนๆฏ”๏ผšๆŒ‰ๅบ„ๅฎถๅ‘็‰Œ๏ผˆๆ˜Ž็‰Œ๏ผ‰ๅˆ†็ป„๏ผŒๅˆ†ๆž็Žฉๅฎถโ€œไธ่พ“โ€็š„ๆฆ‚็Ž‡ # ไฟๅฎˆๆจกๅž‹ data_naive = 1 - (model_kf_naive.grouper(by='dealer_card_num').total_sum()['lose'] / model_kf_naive.grouper(by='dealer_card_num').count()['lose']) # ้šๆœบๆจกๅž‹ data_random = 1 - (model_kf_random.grouper(by='dealer_card_num').total_sum()['lose'] / model_kf_random.grouper(by='dealer_card_num').count()['lose']) # ๆ–ฐๆจกๅž‹ data_smart = 1 - (model_kf_smart.grouper(by='dealer_card_num').total_sum()['lose'] / model_kf_smart.grouper(by='dealer_card_num').count()['lose']) data = mk.KnowledgeFrame() data['naive'] = data_naive data['random'] = data_random data['smart'] = data_smart comparison_chart(data, 'dealer') # ๆจกๅž‹ๅฏนๆฏ”๏ผšๆŒ‰็Žฉๅฎถๅ‘็‰Œๅˆ†็ป„๏ผŒๅˆ†ๆž็Žฉๅฎถโ€œไธ่พ“โ€็š„ๆฆ‚็Ž‡ # ไฟๅฎˆๆจกๅž‹ data_naive = 1 - (model_kf_naive.grouper(by='player_total_initial').total_sum()['lose'] / model_kf_naive.grouper(by='player_total_initial').count()['lose']) # ้šๆœบๆจกๅž‹ data_random = 1 - (model_kf_random.grouper(by='player_total_initial').total_sum()['lose'] / model_kf_random.grouper(by='player_total_initial').count()['lose']) # ๆ–ฐๆจกๅž‹ data_smart = 1 - (model_kf_smart.grouper(by='player_total_initial').total_sum()['lose'] / model_kf_smart.grouper(by='player_total_initial').count()['lose']) data =
mk.KnowledgeFrame()
pandas.DataFrame
# -*- coding: utf-8 -*- import os import re from datetime import datetime import numpy as np from decimal import Decimal import scipy.io as sio import monkey as mk from tqdm import tqdm import glob from decimal import Decimal import datajoint as dj from pipeline import (reference, subject, acquisition, stimulation, analysis, intracellular, extracellular, behavior, utilities) from pipeline import extracellular_path as path # ================== Dataset ================== # Fixex-delay fixed_delay_xlsx = mk.read_excel( os.path.join(path, 'FixedDelayTask', 'SI_table_2_bilateral_perturb.xlsx'), index_col =0, usecols='A, P, Q, R, S', skiprows=2, nrows=20) fixed_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time'] fixed_delay_xlsx['sex'] = 'Unknown' fixed_delay_xlsx['sess_type'] = 'Auditory task' fixed_delay_xlsx['delay_duration'] = 2 # Random-long-delay random_long_delay_xlsx = mk.read_excel( os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'), index_col =0, usecols='A, P, Q, R, S', skiprows=5, nrows=23) random_long_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time'] random_long_delay_xlsx['sex'] = 'Unknown' random_long_delay_xlsx['sess_type'] = 'Auditory task' random_long_delay_xlsx['delay_duration'] = np.nan # Random-short-delay random_short_delay_xlsx = mk.read_excel( os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'), index_col =0, usecols='A, F, G, H, I', skiprows=42, nrows=11) random_short_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time'] random_short_delay_xlsx['sex'] = 'Unknown' random_short_delay_xlsx['sess_type'] = 'Auditory task' random_short_delay_xlsx['delay_duration'] = np.nan # Tactile-task tactile_xlsx = mk.read_csv( os.path.join(path, 'TactileTask', 'Whisker_taskTavle_for_paper.csv'), index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=30) tactile_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time'] tactile_xlsx = tactile_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex']) tactile_xlsx['sess_type'] = 'Tactile task' tactile_xlsx['delay_duration'] = 1.2 # Sound-task 1.2s sound12_xlsx = mk.read_csv( os.path.join(path, 'Sound task 1.2s', 'OppositeTask12_for_paper.csv'), index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=37) sound12_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time'] sound12_xlsx = sound12_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex']) sound12_xlsx['sess_type'] = 'Auditory task' sound12_xlsx['delay_duration'] = 1.2 # concating total_all 5 meta_data =
mk.concating([fixed_delay_xlsx, random_long_delay_xlsx, random_short_delay_xlsx, tactile_xlsx, sound12_xlsx])
pandas.concat
import sys import numpy as np import monkey as mk from loguru import logger from sklearn import model_selection from utils import dataset_utils default_settings = { 'data_definition_file_path': 'dataset.csv', 'folds_num': 5, 'data_random_seed': 1509, 'train_val_fraction': 0.8, 'train_fraction': 0.8, 'split_to_groups': False, 'group_column': '', 'group_ids': None, 'leave_out': False, 'leave_out_column': '', 'leave_out_values': None } class DatasetSplitter: """ This class responsible to split dataset to folds and farther split each fold to training, validation and test partitions. Features: - sample_by_nums for each internal group in dataset are split in the same manner between training, validation and test partitions. - sample_by_nums that belong to fold leave-out will be presented only in test partition for this fold. """ def __init__(self, settings): """ This method initializes parameters :return: None """ self.settings = settings self.dataset_kf = None self.groups_kf_list = None self.train_kf_list = None self.val_kf_list = None self.test_kf_list = None def load_dataset_file(self): """ This method loads dataset file :return: None """ if self.settings['data_definition_file_path']: logger.info("Loading dataset file {0}".formating(self.settings['data_definition_file_path'])) self.dataset_kf = dataset_utils.load_dataset_file(self.settings['data_definition_file_path']) logger.info("Dataset contains {0} entries".formating(self.dataset_kf.shape[0])) else: logger.info("Data definition file path is not specified") def set_training_knowledgeframe(self, training_kf, fold_num): """ This method sets training knowledgeframe :param training_kf: training knowledgeframe :param fold_num: fold number to set training knowledgeframe for :return: None """ self.train_kf_list[fold_num] = training_kf logger.info("Training knowledgeframe with {0} entries is set for fold {1}".formating(training_kf.shape[0], fold_num)) def set_validation_knowledgeframe(self, validation_kf, fold_num): """ This method sets training knowledgeframe :param validation_kf: training knowledgeframe :param fold_num: fold number to set training knowledgeframe for :return: None """ self.val_kf_list[fold_num] = validation_kf logger.info("Validation knowledgeframe with {0} entries is set for fold {1}".formating(validation_kf.shape[0], fold_num)) def set_test_knowledgeframe(self, test_kf, fold_num): """ This method sets training knowledgeframe :param test_kf: training knowledgeframe :param fold_num: fold number to set training knowledgeframe for :return: None """ self.test_kf_list[fold_num] = test_kf logger.info("Test knowledgeframe with {0} entries is set for fold {1}".formating(test_kf.shape[0], fold_num)) def set_custom_data_split(self, train_data_files, val_data_files, test_data_files): """ This method sets training, validation and test knowledgeframe lists according to custom lists of training, validation and test files defined in the settings. :return: None """ logger.info("Loading custom lists of training validation and test files") self.train_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in train_data_files] self.val_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in val_data_files] self.test_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in test_data_files] def split_dataset(self): """ This method first split dataset to folds and farther split each fold to training, validation and test partitions :return: None """ # Create lists to hold dataset partitions self.train_kf_list = [None] * self.settings['folds_num'] self.val_kf_list = [None] * self.settings['folds_num'] self.test_kf_list = [None] * self.settings['folds_num'] # Set random seed to ensure reproducibility of dataset partitioning across experiments on same hardware np.random.seed(self.settings['data_random_seed']) # Split dataset to groups if self.settings['split_to_groups']: self.split_dataset_to_groups() else: self.groups_kf_list = [self.dataset_kf] # Permute entries in each group self.groups_kf_list = [group_kf.reindexing(np.random.permutation(group_kf.index)) for group_kf in self.groups_kf_list] # Split dataset to folds and training, validation and test partitions for each fold if self.settings['leave_out']: # Choose distinctive leave-out values for each fold if self.settings['leave_out_values'] is None: self.choose_leave_out_values() # Split dataset to folds based on leave-out values self.split_dataset_to_folds_with_leave_out() else: # Split dataset to folds in random manner self.split_dataset_to_folds_randomly() def split_dataset_to_groups(self): """ # This method splits dataset to groups based on values of 'self.group_column'. # Samples in each group are split in same manner between training, validation and test partitions. # This is important, for example, to ensure that each class (in classification problem) is represented # in training, validation and test partition. """ logger.info("Dividing dataset to groups based on values of '{0}' dataset column".formating(self.settings['group_column'])) # Get groups identifiers if self.settings['group_ids'] is None: group_ids = self.dataset_kf[self.settings['group_column']].distinctive() else: group_ids = self.settings['group_ids'] logger.info("Dataset groups are: {0}".formating(group_ids)) # Split dataset to groups self.groups_kf_list = [self.dataset_kf[self.dataset_kf[self.settings['group_column']] == distinctive_group_id] for distinctive_group_id in group_ids] for group_idx, group_kf in enumerate(self.groups_kf_list): logger.info("Group {0} contains {1} sample_by_nums".formating(group_ids[group_idx], group_kf.shape[0])) def choose_leave_out_values(self): """ This method chooses leave-out values for each fold. Leave-out values calculated based on values of 'self.leave_out_column'. Dataset entries which 'self.leave_out_column' value is one of calculated leave-out values for specific fold will present only in test partition for this fold. :return: None """ logger.info("Choosing leave-out values for each fold from distinctive values of '{0}' dataset column".formating(self.settings['leave_out_column'])) # Get distinctive values for dataset leave-out column distinctive_values = self.dataset_kf[self.settings['leave_out_column']].distinctive() logger.info("Unique values for column {0} are: {1}".formating(self.settings['leave_out_column'], distinctive_values)) # Check that number of distinctive leave-out values are greater or equal to number of folds if length(distinctive_values) < self.settings['folds_num']: logger.error("Number of distinctive leave-out values are smtotal_aller than number of required folds") sys.exit(1) # Get list of distinctive leave-out values for each fold if self.settings['folds_num'] > 1: self.settings['leave_out_values'] = np.array_split(distinctive_values, self.settings['folds_num']) else: self.settings['leave_out_values'] = [np.random.choice(distinctive_values, int(length(distinctive_values) * (1 - self.settings['train_val_fraction'])), replacing=False)] for fold in range(0, self.settings['folds_num']): logger.info("Leave out values for fold {0} are: {1}".formating(fold, self.settings['leave_out_values'][fold])) def split_dataset_to_folds_with_leave_out(self): """ This method splits dataset to folds and training, validation and test partitions for each fold based on leave-out values. Samples in each group are split in same manner between training, validation and test partitions. Leave-out values will be presented only in test partition of corresponding fold. """ logger.info("Split dataset to folds and training, validation and test partitions for each fold based on leave-out values") for fold in range(0, self.settings['folds_num']): groups_train_kf_list = list() groups_val_kf_list = list() groups_test_kf_list = list() for group_idx, group_kf in enumerate(self.groups_kf_list): group_test_kf = group_kf[group_kf[self.settings['leave_out_column']].incontain(self.settings['leave_out_values'][fold])] if group_test_kf.shape[0] == 0: logger.warning("Group {0} hasn't whatever of leave out values: {1}".formating(group_idx, self.settings['leave_out_values'][fold])) else: groups_test_kf_list.adding(group_test_kf) group_train_val_kf = group_kf[~group_kf[self.settings['leave_out_column']].incontain(self.settings['leave_out_values'][fold])] if group_train_val_kf.shape[0] == 0: logger.warning("All sample_by_nums of group {0} is in one of leave out values: {1}".formating(group_idx, self.settings['leave_out_values'][fold])) else: train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction']) groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx]) groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:]) self.train_kf_list[fold] = mk.concating(groups_train_kf_list) self.val_kf_list[fold] = mk.concating(groups_val_kf_list) self.test_kf_list[fold] = mk.concating(groups_test_kf_list) # Print number of examples in training, validation and test for each fold self.print_data_split() def split_dataset_to_folds_randomly(self): """ This method splits dataset to folds and training, validation and test partitions for each fold in random manner. Samples in each group are split in same manner between training, validation and test partitions. """ logger.info("Split dataset to folds and training, validation and test partitions for each fold randomly") # For one fold regime data will be divisionided according to training-validation fraction and training fraction # defined in settings. # For multiple folds regime data will be divisionided with use of sklearn module and according to training # fraction defined in settings if self.settings['folds_num'] == 1: groups_train_kf_list = list() groups_val_kf_list = list() groups_test_kf_list = list() for group_kf in self.groups_kf_list: train_val_split_idx = int(group_kf.shape[0] * self.settings['train_val_fraction']) group_train_val_kf = group_kf.iloc[0:train_val_split_idx] groups_test_kf_list.adding(group_kf.iloc[train_val_split_idx:]) train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction']) groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx]) groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:]) self.train_kf_list[0] = mk.concating(groups_train_kf_list) self.val_kf_list[0] = mk.concating(groups_val_kf_list) self.test_kf_list[0] = mk.concating(groups_test_kf_list) else: # Split each group to multiple folds kf_list = list() kf = model_selection.KFold(n_splits=self.settings['folds_num'], shuffle=True, random_state=self.settings['data_random_seed']) for group_kf in self.groups_kf_list: kf_list.adding(kf.split(group_kf)) # Combine group splits to folds for fold in range(0, self.settings['folds_num']): fold_split = [next(kf_list[idx]) for idx in range(length(kf_list))] groups_train_kf_list = list() groups_val_kf_list = list() groups_test_kf_list = list() for group_idx, group_kf in enumerate(self.groups_kf_list): group_train_val_kf = group_kf.iloc[fold_split[group_idx][0]] groups_test_kf_list.adding(group_kf.iloc[fold_split[group_idx][1]]) train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction']) groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx]) groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:]) self.train_kf_list[fold] = mk.concating(groups_train_kf_list) self.val_kf_list[fold] = mk.concating(groups_val_kf_list) self.test_kf_list[fold] =
mk.concating(groups_test_kf_list)
pandas.concat
import os import monkey as mk import matplotlib.pyplot as plt import datapackage as dp import plotly.io as pio import plotly.offline as offline from plots import ( hourly_plot, stacked_plot, price_line_plot, price_scatter_plot, merit_order_plot, filling_level_plot, ) results = [r for r in os.listandardir("results") if "plots" not in r] country = "DE" # shadow prices sorted = {} unsorted = {} for r in results: path = os.path.join("results", r, "output", "shadow_prices.csv") sprices = mk.read_csv(path, index_col=[0], parse_dates=True)[ country + "-electricity" ] sorted[r] = sprices.sort_the_values().values unsorted[r] = sprices.values # residual load and more renewables = ["wind-onshore", "wind-offshore", "solar-pv", "hydro-ror"] timestamps = {} marginal_cost = {} shadow_prices = {} storages = {} prices = {} rload = {} for r in results: path = os.path.join("results", r, "output", country + "-electricity.csv") country_electricity_kf = mk.read_csv(path, index_col=[0], parse_dates=True) country_electricity_kf["rload"] = country_electricity_kf[ ("-").join([country, "electricity-load"]) ] - country_electricity_kf[ [("-").join([country, i]) for i in renewables] ].total_sum( axis=1 ) rload[r] = country_electricity_kf["rload"].values timestamps[r] = country_electricity_kf.index if country == "DE": path = os.path.join("results", r, "input", "datapackage.json") input_datapackage = dp.Package(path) dispatchable = input_datapackage.getting_resource("dispatchable") kf = mk.KnowledgeFrame(dispatchable.read(keyed=True)) kf = kf.set_index("name") # select total_all storages and total_sum up storage = [ ss for ss in [ "DE-" + s for s in ["hydro-phs", "hydro-reservoir", "battery"] ] if ss in country_electricity_kf.columns ] storages[r] = country_electricity_kf[storage].total_sum(axis=1) marginal_cost[r] = kf path = os.path.join("results", r, "output", "shadow_prices.csv") shadow_prices[r] = mk.read_csv(path, index_col=[0], parse_dates=True)[ "DE-electricity" ] storages[r] =
mk.concating([storages[r], shadow_prices[r]], axis=1)
pandas.concat
from datetime import datetime import numpy as np import pytest import monkey.util._test_decorators as td from monkey.core.dtypes.base import _registry as ea_registry from monkey.core.dtypes.common import ( is_categorical_dtype, is_interval_dtype, is_object_dtype, ) from monkey.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, ) from monkey import ( Categorical, KnowledgeFrame, DatetimeIndex, Index, Interval, IntervalIndex, MultiIndex, NaT, Period, PeriodIndex, Collections, Timestamp, cut, date_range, notna, period_range, ) import monkey._testing as tm from monkey.core.arrays import SparseArray from monkey.tcollections.offsets import BDay class TestKnowledgeFrameSetItem: @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"]) def test_setitem_dtype(self, dtype, float_frame): arr = np.random.randn(length(float_frame)) float_frame[dtype] = np.array(arr, dtype=dtype) assert float_frame[dtype].dtype.name == dtype def test_setitem_list_not_knowledgeframe(self, float_frame): data = np.random.randn(length(float_frame), 2) float_frame[["A", "B"]] = data tm.assert_almost_equal(float_frame[["A", "B"]].values, data) def test_setitem_error_msmgs(self): # GH 7432 kf = KnowledgeFrame( {"bar": [1, 2, 3], "baz": ["d", "e", "f"]}, index=Index(["a", "b", "c"], name="foo"), ) ser = Collections( ["g", "h", "i", "j"], index=Index(["a", "b", "c", "a"], name="foo"), name="fiz", ) msg = "cannot reindexing from a duplicate axis" with pytest.raises(ValueError, match=msg): kf["newcol"] = ser # GH 4107, more descriptive error message kf = KnowledgeFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"]) msg = "incompatible index of inserted column with frame index" with pytest.raises(TypeError, match=msg): kf["gr"] = kf.grouper(["b", "c"]).count() def test_setitem_benchmark(self): # from the vb_suite/frame_methods/frame_insert_columns N = 10 K = 5 kf = KnowledgeFrame(index=range(N)) new_col = np.random.randn(N) for i in range(K): kf[i] = new_col expected = KnowledgeFrame(np.repeat(new_col, K).reshape(N, K), index=range(N)) tm.assert_frame_equal(kf, expected) def test_setitem_different_dtype(self): kf = KnowledgeFrame( np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"] ) kf.insert(0, "foo", kf["a"]) kf.insert(2, "bar", kf["c"]) # diff dtype # new item kf["x"] = kf["a"].totype("float32") result = kf.dtypes expected = Collections( [np.dtype("float64")] * 5 + [np.dtype("float32")], index=["foo", "c", "bar", "b", "a", "x"], ) tm.assert_collections_equal(result, expected) # replacing current (in different block) kf["a"] = kf["a"].totype("float32") result = kf.dtypes expected = Collections( [np.dtype("float64")] * 4 + [np.dtype("float32")] * 2, index=["foo", "c", "bar", "b", "a", "x"], ) tm.assert_collections_equal(result, expected) kf["y"] = kf["a"].totype("int32") result = kf.dtypes expected = Collections( [np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")], index=["foo", "c", "bar", "b", "a", "x", "y"], ) tm.assert_collections_equal(result, expected) def test_setitem_empty_columns(self): # GH 13522 kf = KnowledgeFrame(index=["A", "B", "C"]) kf["X"] = kf.index kf["X"] = ["x", "y", "z"] exp = KnowledgeFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"]) tm.assert_frame_equal(kf, exp) def test_setitem_dt64_index_empty_columns(self): rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") kf = KnowledgeFrame(index=np.arange(length(rng))) kf["A"] = rng assert kf["A"].dtype == np.dtype("M8[ns]") def test_setitem_timestamp_empty_columns(self): # GH#19843 kf = KnowledgeFrame(index=range(3)) kf["now"] = Timestamp("20130101", tz="UTC") expected = KnowledgeFrame( [[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"] ) tm.assert_frame_equal(kf, expected) def test_setitem_wrong_lengthgth_categorical_dtype_raises(self): # GH#29523 cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"]) kf = KnowledgeFrame(range(10), columns=["bar"]) msg = ( rf"Length of values \({length(cat)}\) " rf"does not match lengthgth of index \({length(kf)}\)" ) with pytest.raises(ValueError, match=msg): kf["foo"] = cat def test_setitem_with_sparse_value(self): # GH#8131 kf = KnowledgeFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]}) sp_array = SparseArray([0, 0, 1]) kf["new_column"] = sp_array expected =
Collections(sp_array, name="new_column")
pandas.Series
import numpy as np import monkey as mk import spacy from spacy.lang.de.stop_words import STOP_WORDS from nltk.tokenize import sent_tokenize from itertools import grouper import clone import re import sys import textstat # Method to create a matrix with contains only zeroes and a index starting by 0 def create_matrix_index_zeros(rows, columns): arr = np.zeros((rows, columns)) for r in range(0, rows): arr[r, 0] = r return arr # Method to getting total_all authors with a given number of texts. Used in chapter 5.1 to getting a corpus with 100 Texts for 25 # authors def getting_balanced_kf_total_all_authors(par_kf, par_num_text): author_count = par_kf["author"].counts_value_num() author_list = [] kf_balanced_text = mk.KnowledgeFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text']) for i in range(0, length(author_count)): if author_count[i] >= par_num_text and not author_count.index[i] == "Gast-Rezensent": author_list.adding(author_count.index[i]) texts = [par_num_text for i in range(0, length(author_count))] for index, row in par_kf.traversal(): if row['author'] in author_list: if texts[author_list.index(row['author'])] != 0: d = {'author': [row['author']], 'genres': [row['genres']], 'release_date': [row['release_date']], 'text': [row['text']]} kf_balanced_text = kf_balanced_text.adding(mk.KnowledgeFrame.from_dict(d), ignore_index=True) texts[author_list.index(row['author'])] -= 1 if total_sum(texts) == 0: break # Label encoding and delete author column after dic_author_mappingping = author_encoding(kf_balanced_text) kf_balanced_text['label_encoded'] = getting_encoded_author_vector(kf_balanced_text, dic_author_mappingping)[:, 0] kf_balanced_text.sip("author", axis=1, inplace=True) # Print author mappingping in file original_standardout = sys.standardout with open('author_mappingping.txt', 'w') as f: sys.standardout = f print(dic_author_mappingping) sys.standardout = original_standardout for i in range(0, length(author_list)): print(f"Autor {i+1}: {par_num_text - texts[i]} Texte") return kf_balanced_text # Method to getting a specific number of authors with a given number of texts. Used later on to getting results for different # combinations of authors and texts def getting_balanced_kf_by_texts_authors(par_kf, par_num_text, par_num_author): author_count = par_kf["author"].counts_value_num() author_list = [] kf_balanced_text = mk.KnowledgeFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text']) loop_count, loops = 0, par_num_author while loop_count < loops: if author_count[loop_count] >= par_num_text and not author_count.index[loop_count] == "Gast-Rezensent": author_list.adding(author_count.index[loop_count]) # Skip the Author "Gast-Rezensent" if its not the final_item value_round and increase the loops by 1 elif author_count.index[loop_count] == "Gast-Rezensent": loops += 1 loop_count += 1 texts = [par_num_text for i in range(0, length(author_list))] for index, row in par_kf.traversal(): if row['author'] in author_list: if texts[author_list.index(row['author'])] != 0: d = {'author': [row['author']], 'genres': [row['genres']], 'release_date': [row['release_date']], 'text': [row['text']]} kf_balanced_text = kf_balanced_text.adding(mk.KnowledgeFrame.from_dict(d), ignore_index=True) texts[author_list.index(row['author'])] -= 1 if total_sum(texts) == 0: break # Label encoding and delete author column after dic_author_mappingping = author_encoding(kf_balanced_text) kf_balanced_text['label_encoded'] = getting_encoded_author_vector(kf_balanced_text, dic_author_mappingping)[:, 0] kf_balanced_text.sip("author", axis=1, inplace=True) # Print author mappingping in file original_standardout = sys.standardout with open('author_mappingping.txt', 'w') as f: sys.standardout = f print(dic_author_mappingping) sys.standardout = original_standardout for i in range(0, length(author_list)): print(f"Autor {i+1}: {par_num_text - texts[i]} Texte") return kf_balanced_text # Feature extraction of the feature described in chapter 5.6.1 def getting_bow_matrix(par_kf): nlp = spacy.load("de_core_news_sm") d_bow = {} d_bow_list = [] function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"] for index, row in par_kf.traversal(): tokens = nlp(row['text']) tokens = [word for word in tokens if not word.is_punct and not word.is_space and not word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos] for word in tokens: try: d_bow["bow:"+word.lemma_.lower()] += 1 except KeyError: d_bow["bow:"+word.lemma_.lower()] = 1 d_bow_list.adding(clone.deepclone(d_bow)) d_bow.clear() return mk.KnowledgeFrame(d_bow_list) # Feature extraction of the feature described in chapter 5.6.2 def getting_word_n_grams(par_kf, n): nlp = spacy.load("de_core_news_sm") d_word_ngram = {} d_word_ngram_list = [] function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"] for index, row in par_kf.traversal(): tokens = nlp(row['text']) tokens = [word for word in tokens if not word.is_punct and not word.is_space and not word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos] tokens = [token.lemma_.lower() for token in tokens] for w in range(0, length(tokens)): if w + n <= length(tokens): try: d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] += 1 except KeyError: d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] = 1 d_word_ngram_list.adding(clone.deepclone(d_word_ngram)) d_word_ngram.clear() return mk.KnowledgeFrame(d_word_ngram_list) # Feature extraction of the feature described in chapter 5.6.3 def getting_word_count(par_kf): arr_wordcount = np.zeros((length(par_kf), 1)) nlp = spacy.load("de_core_news_sm") only_words = [] for index, row in par_kf.traversal(): tokens = nlp(row['text']) for t in tokens: if not t.is_punct and not t.is_space: only_words.adding(t) arr_wordcount[index] = length(only_words) only_words.clear() return mk.KnowledgeFrame(data=arr_wordcount, columns=["word_count"]) # Feature extraction of the feature described in chapter 5.6.4 with some variations # Count total_all word lengthgths indivisionidutotal_ally def getting_word_lengthgth_matrix(par_kf): nlp = spacy.load("de_core_news_sm") d_word_length = {} d_word_length_list = [] for index, row in par_kf.traversal(): tokens = nlp(row['text']) tokens = [word for word in tokens if not word.is_punct and not word.is_space and not word.is_digit] for word in tokens: try: d_word_length["w_length:"+str(length(word.text))] += 1 except KeyError: d_word_length["w_length:"+str(length(word.text))] = 1 d_word_length_list.adding(clone.deepclone(d_word_length)) d_word_length.clear() return mk.KnowledgeFrame(d_word_length_list) # Count word lengthgths and set 2 intervals def getting_word_lengthgth_matrix_with_interval(par_kf, border_1, border_2): arr_wordcount_with_interval = np.zeros((length(par_kf), border_1 + 2)) nlp = spacy.load("de_core_news_sm") for index, row in par_kf.traversal(): tokens = nlp(row['text']) for word in tokens: if length(word.text) <= border_1 and not word.is_punct and not word.is_space and not word.is_digit: arr_wordcount_with_interval[index, length(word.text) - 1] += 1 elif border_1 < length( word.text) <= border_2 and not word.is_punct and not word.is_space and not word.is_digit: arr_wordcount_with_interval[index, -2] += 1 elif not word.is_punct and not word.is_space and not word.is_digit: arr_wordcount_with_interval[index, -1] += 1 word_lengthgth_labels = [str(i) for i in range(1, border_1+1)] word_lengthgth_labels.adding(f"{border_1+1}-{border_2}") word_lengthgth_labels.adding(f">{border_2}") return mk.KnowledgeFrame(data=arr_wordcount_with_interval, columns=word_lengthgth_labels) # Count word lengthgths and total_sum total_all above a defined margin def getting_word_lengthgth_matrix_with_margin(par_kf, par_margin): arr_wordcount_with_interval = np.zeros((length(par_kf), par_margin + 1)) nlp = spacy.load("de_core_news_sm") for index, row in par_kf.traversal(): tokens = nlp(row['text']) for word in tokens: if length(word.text) <= par_margin and not word.is_punct and not word.is_space and not word.is_digit: arr_wordcount_with_interval[index, length(word.text) - 1] += 1 elif par_margin < length(word.text) and not word.is_punct and not word.is_space and not word.is_digit: arr_wordcount_with_interval[index, -1] += 1 word_lengthgth_labels = [str(i) for i in range(1, par_margin+1)] word_lengthgth_labels.adding(f">{par_margin}") return mk.KnowledgeFrame(data=arr_wordcount_with_interval, columns=word_lengthgth_labels) # Count the average word lengthgth of the article def getting_average_word_lengthgth(par_kf): arr_avg_word_length_vector = np.zeros((length(par_kf), 1)) nlp = spacy.load("de_core_news_sm") for index, row in par_kf.traversal(): symbol_total_sum = 0 words = 0 tokens = nlp(row['text']) for word in tokens: if not word.is_punct and not word.is_space and not word.is_digit: symbol_total_sum += length(word.text) words += 1 arr_avg_word_length_vector[index, 0] = symbol_total_sum / words return mk.KnowledgeFrame(data=arr_avg_word_length_vector, columns=["avg_word_lengthgth"]) # Feature extraction of the feature described in chapter 5.6.5 def getting_yules_k(par_kf): d = {} nlp = spacy.load("de_core_news_sm") arr_yulesk = np.zeros((length(par_kf), 1)) for index, row in par_kf.traversal(): tokens = nlp(row['text']) for t in tokens: if not t.is_punct and not t.is_space and not t.is_digit: w = t.lemma_.lower() try: d[w] += 1 except KeyError: d[w] = 1 s1 = float(length(d)) s2 = total_sum([length(list(g)) * (freq ** 2) for freq, g in grouper(sorted(d.values()))]) try: k = 10000 * (s2 - s1) / (s1 * s1) arr_yulesk[index] = k except ZeroDivisionError: pass d.clear() return mk.KnowledgeFrame(data=arr_yulesk, columns=["yulesk"]) # Feature extraction of the feature described in chapter 5.6.6 # Get a vector of total_all special characters def getting_special_char_label_vector(par_kf): nlp = spacy.load("de_core_news_sm") special_char_label_vector = [] for index, row in par_kf.traversal(): tokens = nlp(row['text']) for t in tokens: chars = ' '.join([c for c in t.text]) chars = nlp(chars) for c in chars: if c.is_punct and c.text not in special_char_label_vector: special_char_label_vector.adding(c.text) return special_char_label_vector # Get a matrix of total_all special character by a given vector of special chars def getting_special_char_matrix(par_kf, par_special_char_label_vector): nlp = spacy.load("de_core_news_sm") arr_special_char = np.zeros((length(par_kf), length(par_special_char_label_vector))) for index, row in par_kf.traversal(): tokens = nlp(row['text']) for t in tokens: chars = ' '.join([c for c in t.text]) chars = nlp(chars) for c in chars: if c.text in par_special_char_label_vector: arr_special_char[index, par_special_char_label_vector.index(c.text)] += 1 return arr_special_char # Feature extraction of the feature described in chapter 5.6.7 # Get the char-affix-n-grams by a defined n def getting_char_affix_n_grams(par_kf, n): d_prefix_list, d_suffix_list, d_space_prefix_list, d_space_suffix_list = [], [], [], [] d_prefix, d_suffix, d_space_prefix, d_space_suffix = {}, {}, {}, {} nlp = spacy.load("de_core_news_sm") for index, row in par_kf.traversal(): tokens = nlp(row['text']) for w in range(0, length(tokens)): # Prefix if length(tokens[w].text) >= n + 1: try: d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] += 1 except KeyError: d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] = 1 # Suffix if length(tokens[w].text) >= n + 1: try: d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] += 1 except KeyError: d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] = 1 d_prefix_list.adding(clone.deepclone(d_prefix)) d_suffix_list.adding(clone.deepclone(d_suffix)) d_prefix.clear() d_suffix.clear() for i in range(0, length(row['text'])): if row['text'][i] == " " and i + n <= length(row['text']) and i - n >= 0: # Space-prefix try: d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] += 1 except KeyError: d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] = 1 # Space-suffix try: d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] += 1 except KeyError: d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] = 1 d_space_prefix_list.adding(clone.deepclone(d_space_prefix)) d_space_suffix_list.adding(clone.deepclone(d_space_suffix)) d_space_prefix.clear() d_space_suffix.clear() kf_pre = mk.KnowledgeFrame(d_prefix_list) kf_su = mk.KnowledgeFrame(d_suffix_list) kf_s_pre = mk.KnowledgeFrame(d_space_prefix_list) kf_s_su = mk.KnowledgeFrame(d_space_suffix_list) kf_affix = mk.concating([kf_pre, kf_su, kf_s_pre, kf_s_su], axis=1) return kf_affix # Get the char-word-n-grams by a defined n def getting_char_word_n_grams(par_kf, n): d_whole_word_list, d_mid_word_list, d_multi_word_list = [], [], [] d_whole_word, d_mid_word, d_multi_word = {}, {}, {} match_list = [] nlp = spacy.load("de_core_news_sm") for index, row in par_kf.traversal(): tokens = nlp(row['text']) for w in range(0, length(tokens)): # Whole-word if length(tokens[w].text) == n: try: d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] += 1 except KeyError: d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] = 1 # Mid-word if length(tokens[w].text) >= n + 2: for i in range(1, length(tokens[w].text) - n): try: d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] += 1 except KeyError: d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] = 1 d_whole_word_list.adding(clone.deepclone(d_whole_word)) d_mid_word_list.adding(clone.deepclone(d_mid_word)) d_whole_word.clear() d_mid_word.clear() # Multi-word # ignore special character trimmed_text = re.sub(r'[\s]+', ' ', re.sub(r'[^\w ]+', '', row['text'])) match_list.clear() for i in range(1, n - 1): regex = r"\w{" + str(i) + r"}\s\w{" + str(n - 1 - i) + r"}" match_list += re.findtotal_all(regex, trimmed_text.lower()) for match in match_list: try: d_multi_word["c" + str(n) + "_mw: " + match] += 1 except KeyError: d_multi_word["c" + str(n) + "_mw: " + match] = 1 d_multi_word_list.adding(clone.deepclone(d_multi_word)) d_multi_word.clear() kf_ww = mk.KnowledgeFrame(d_whole_word_list) kf_miw = mk.KnowledgeFrame(d_mid_word_list) kf_mw =
mk.KnowledgeFrame(d_multi_word_list)
pandas.DataFrame
from __future__ import divisionision import configparser import logging import os import re import time from collections import OrderedDict import numpy as np import monkey as mk import scipy.interpolate as itp from joblib import Partotal_allel from joblib import delayed from matplotlib import pyplot as plt from pyplanscoring.core.dicomparser import ScoringDicomParser from pyplanscoring.core.dosimetric import read_scoring_criteria, constrains, Competition2016 from pyplanscoring.core.dvhcalculation import Structure, prepare_dvh_data, calc_dvhs_upsample_by_numd, save_dicom_dvhs, load from pyplanscoring.core.dvhdoses import getting_dvh_getting_max from pyplanscoring.core.geometry import getting_axis_grid, getting_interpolated_structure_planes from pyplanscoring.core.scoring import DVHMetrics, Scoring, Participant # TODO extract constrains from analytical curves class CurveCompare(object): """ Statistical analysis of the DVH volume (%) error histograms. volume (cm 3 ) differences (numericalโ€“analytical) were calculated for points on the DVH curve sample_by_numd at every 10 cGy then normalized to the structure's total volume (cm 3 ) to give the error in volume (%) """ def __init__(self, a_dose, a_dvh, calc_dose, calc_dvh, structure_name='', dose_grid='', gradient=''): self.calc_data = '' self.ref_data = '' self.a_dose = a_dose self.a_dvh = a_dvh self.cal_dose = calc_dose self.calc_dvh = calc_dvh self.sampling_size = 10/100.0 self.dose_sample_by_nums = np.arange(0, length(calc_dvh)/100, self.sampling_size) # The DVH curve sample_by_numd at every 10 cGy self.ref_dvh = itp.interp1d(a_dose, a_dvh, fill_value='extrapolate') self.calc_dvh = itp.interp1d(calc_dose, calc_dvh, fill_value='extrapolate') self.delta_dvh = self.calc_dvh(self.dose_sample_by_nums) - self.ref_dvh(self.dose_sample_by_nums) self.delta_dvh_pp = (self.delta_dvh / a_dvh[0]) * 100 # prepare data dict # self.calc_dvh_dict = _prepare_dvh_data(self.dose_sample_by_nums, self.calc_dvh(self.dose_sample_by_nums)) # self.ref_dvh_dict = _prepare_dvh_data(self.dose_sample_by_nums, self.ref_dvh(self.dose_sample_by_nums)) # title data self.structure_name = structure_name self.dose_grid = dose_grid self.gradient = gradient def stats(self): kf = mk.KnowledgeFrame(self.delta_dvh_pp, columns=['delta_pp']) print(kf.describe()) @property def stats_paper(self): stats = {} stats['getting_min'] = self.delta_dvh_pp.getting_min().value_round(1) stats['getting_max'] = self.delta_dvh_pp.getting_max().value_round(1) stats['average'] = self.delta_dvh_pp.average().value_round(1) stats['standard'] = self.delta_dvh_pp.standard(ddof=1).value_round(1) return stats @property def stats_delta_cc(self): stats = {} stats['getting_min'] = self.delta_dvh.getting_min().value_round(1) stats['getting_max'] = self.delta_dvh.getting_max().value_round(1) stats['average'] = self.delta_dvh.average().value_round(1) stats['standard'] = self.delta_dvh.standard(ddof=1).value_round(1) return stats # def getting_constrains(self, constrains_dict): # ref_constrains = eval_constrains_dict(self.ref_dvh_dict, constrains_dict) # calc_constrains = eval_constrains_dict(self.calc_dvh_dict, constrains_dict) # # return ref_constrains, calc_constrains def eval_range(self, lim=0.2): t1 = self.delta_dvh < -lim t2 = self.delta_dvh > lim ok = np.total_sum(np.logical_or(t1, t2)) pp = ok / length(self.delta_dvh) * 100 print('pp %1.2f - %i of %i ' % (pp, ok, self.delta_dvh.size)) def plot_results(self, ref_label, calc_label, title): fig, ax = plt.subplots() ref = self.ref_dvh(self.dose_sample_by_nums) calc = self.calc_dvh(self.dose_sample_by_nums) ax.plot(self.dose_sample_by_nums, ref, label=ref_label) ax.plot(self.dose_sample_by_nums, calc, label=calc_label) ax.set_ylabel('volume [cc]') ax.set_xlabel('Dose [Gy]') ax.set_title(title) ax.legend(loc='best') def test_real_dvh(): rs_file = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RS.1.2.246.352.71.4.584747638204.248648.20170123083029.dcm' rd_file = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RD.1.2.246.352.71.7.584747638204.1750110.20170123082607.dcm' rp = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RP.1.2.246.352.71.5.584747638204.952069.20170122155706.dcm' # dvh_file = r'/media/victor/TOURO Mobile/COMPETITION 2017/Send to Victor - Jan10 2017/Norm Res with CT Images/RD.1.2.246.352.71.7.584747638204.1746016.20170110164605.dvh' f = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/PlanIQ Criteria TPS PlanIQ matched str names - TXT Fromat - Last mod Jan23.txt' constrains_total_all, scores_total_all, criteria = read_scoring_criteria(f) dose = ScoringDicomParser(filengthame=rd_file) struc = ScoringDicomParser(filengthame=rs_file) structures = struc.GetStructures() ecl_DVH = dose.GetDVHs() plt.style.use('ggplot') st = time.time() dvhs = {} for structure in structures.values(): for end_cap in [False]: if structure['id'] in ecl_DVH: # if structure['id'] in [37, 38]: if structure['name'] in list(scores_total_all.keys()): ecl_dvh = ecl_DVH[structure['id']]['data'] ecl_dgetting_max = ecl_DVH[structure['id']]['getting_max'] * 100 # to cGy struc_teste = Structure(structure, end_cap=end_cap) # struc['planes'] = struc_teste.planes # dicompyler_dvh = getting_dvh(structure, dose) fig, ax = plt.subplots() fig.set_figheight(12) fig.set_figwidth(20) dhist, chist = struc_teste.calculate_dvh(dose, up_sample_by_num=True) getting_max_dose = getting_dvh_getting_max(chist) ax.plot(dhist, chist, label='Up sample_by_numd - Dgetting_max: %1.1f cGy' % getting_max_dose) fig.hold(True) ax.plot(ecl_dvh, label='Eclipse - Dgetting_max: %1.1f cGy' % ecl_dgetting_max) dvh_data = prepare_dvh_data(dhist, chist) txt = structure['name'] + ' volume (cc): %1.1f - end_cap: %s ' % ( ecl_dvh[0], str(end_cap)) ax.set_title(txt) # nup = getting_dvh_getting_max(dicompyler_dvh['data']) # plt.plot(dicompyler_dvh['data'], label='Software DVH - Dgetting_max: %1.1f cGy' % nup) ax.legend(loc='best') ax.set_xlabel('Dose (cGy)') ax.set_ylabel('volume (cc)') fname = txt + '.png' fig.savefig(fname, formating='png', dpi=100) dvhs[structure['name']] = dvh_data end = time.time() print('Total elapsed Time (getting_min): ', (end - st) / 60) def test_spacing(root_path): """ # TEST PLANIQ RS-DICOM DATA if z planes are not equal spaced. :param root_path: root path """ root_path = r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/STRUCTURES' structure_files = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files if name.endswith(('.dcm', '.DCM'))] eps = 0.001 test_result = {} for f in structure_files: structures = ScoringDicomParser(filengthame=f).GetStructures() for key in structures: try: total_all_z = np.array([z for z in structures[key]['planes'].keys()], dtype=float) total_all_sorted_diff = np.diff(np.sort(total_all_z)) test = (abs((total_all_sorted_diff - total_all_sorted_diff[0])) > eps).whatever() test_result[structures[key]['name']] = test except: print('Error in key:', key) b = {key: value for key, value in test_result.items() if value == True} return test_result def test_planes_spacing(sPlanes): eps = 0.001 total_all_z = np.array([z for z in sPlanes], dtype=float) total_all_sorted_diff = np.diff(np.sort(total_all_z)) test = (abs((total_all_sorted_diff - total_all_sorted_diff[0])) > eps).whatever() return test, total_all_sorted_diff def test_upsample_by_numd_z_spacing(sPlanes): z = 0.1 ordered_keys = [z for z, sPlane in sPlanes.items()] ordered_keys.sort(key=float) ordered_planes = np.array(ordered_keys, dtype=float) z_interp_positions, dz = getting_axis_grid(z, ordered_planes) hi_res_structure = getting_interpolated_structure_planes(sPlanes, z_interp_positions) ordered_keys = [z for z, sPlane in hi_res_structure.items()] ordered_keys.sort(key=float) t, p = test_planes_spacing(hi_res_structure) assert t is False def eval_constrains_dict(dvh_data_tmp, constrains_dict): mtk = DVHMetrics(dvh_data_tmp) values_tmp = OrderedDict() for ki in constrains_dict.keys(): cti = mtk.eval_constrain(ki, constrains_dict[ki]) values_tmp[ki] = cti return values_tmp def getting_analytical_curve(an_curves_obj, file_structure_name, column): an_curve_i = an_curves_obj[file_structure_name.split('_')[0]] dose_an = an_curve_i['Dose (cGy)'].values an_dvh = an_curve_i[column].values # check nonzero idx = np.nonzero(an_dvh) # remove 0 volumes from DVH dose_range, cdvh = dose_an[idx], an_dvh[idx] return dose_range, cdvh def calc_data(row, dose_files_dict, structure_dict, constrains, calculation_options): idx, values = row[0], row[1] s_name = values['Structure name'] voxel = str(values['Dose Voxel (mm)']) gradient = values['Gradient direction'] dose_file = dose_files_dict[gradient][voxel] struc_file = structure_dict[s_name] # getting structure and dose dicom_dose = ScoringDicomParser(filengthame=dose_file) struc = ScoringDicomParser(filengthame=struc_file) structures = struc.GetStructures() structure = structures[2] # set end cap by 1/2 slice thickness calculation_options['end_cap'] = structure['thickness'] / 2.0 # set up sample_by_numd structure struc_teste = Structure(structure, calculation_options) dhist, chist = struc_teste.calculate_dvh(dicom_dose) dvh_data = struc_teste.getting_dvh_data() # Setup DVH metrics class and getting DVH DATA metrics = DVHMetrics(dvh_data) values_constrains = OrderedDict() for k in constrains.keys(): ct = metrics.eval_constrain(k, constrains[k]) values_constrains[k] = ct values_constrains['Gradient direction'] = gradient # Get data return mk.Collections(values_constrains, name=voxel), s_name def calc_data_total_all(row, dose_files_dict, structure_dict, constrains, an_curves, col_grad_dict, delta_mm=(0.2, 0.2, 0.2), end_cap=True, up_sample_by_num=True): idx, values = row[0], row[1] s_name = values['Structure name'] voxel = str(values['Dose Voxel (mm)']) gradient = values['Gradient direction'] dose_file = dose_files_dict[gradient][voxel] struc_file = structure_dict[s_name] # getting structure and dose dicom_dose = ScoringDicomParser(filengthame=dose_file) struc = ScoringDicomParser(filengthame=struc_file) structures = struc.GetStructures() structure = structures[2] # set up sample_by_numd structure struc_teste = Structure(structure) struc_teste.set_delta(delta_mm) dhist, chist = struc_teste.calculate_dvh(dicom_dose) # getting its columns from spreadsheet column = col_grad_dict[gradient][voxel] adose_range, advh = getting_analytical_curve(an_curves, s_name, column) # use CurveCompare class to eval similarity from calculated and analytical curves cmp = CurveCompare(adose_range, advh, dhist, chist, s_name, voxel, gradient) ref_constrains, calc_constrains = cmp.getting_constrains(constrains) ref_constrains['Gradient direction'] = gradient calc_constrains['Gradient direction'] = gradient ref_collections = mk.Collections(ref_constrains, name=voxel) calc_collections = mk.Collections(calc_constrains, name=voxel) return ref_collections, calc_collections, s_name, cmp def test11(delta_mm=(0.2, 0.2, 0.1), plot_curves=False): # TEST DICOM DATA structure_files = ['/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Spheres/Sphere_02_0.dcm', '/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cylinders/Cylinder_02_0.dcm', '/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cylinders/RtCylinder_02_0.dcm', '/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cones/Cone_02_0.dcm', '/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cones/RtCone_02_0.dcm'] structure_name = ['Sphere_02_0', 'Cylinder_02_0', 'RtCylinder_02_0', 'Cone__02_0', 'RtCone_02_0'] dose_files = [ r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_0-4_0-2_0-4_mm_Aligned.dcm', r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_1mm_Aligned.dcm', r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_2mm_Aligned.dcm', r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_3mm_Aligned.dcm', r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_0-4_0-2_0-4_mm_Aligned.dcm', r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_1mm_Aligned.dcm', r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_2mm_Aligned.dcm', r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_3mm_Aligned.dcm'] # Structure Dict structure_dict = dict(zip(structure_name, structure_files)) # dose dict dose_files_dict = { 'Z(AP)': {'0.4x0.2x0.4': dose_files[0], '1': dose_files[1], '2': dose_files[2], '3': dose_files[3]}, 'Y(SI)': {'0.4x0.2x0.4': dose_files[4], '1': dose_files[5], '2': dose_files[6], '3': dose_files[7]}} sheets = ['Sphere', 'Cylinder', 'RtCylinder', 'Cone', 'RtCone'] col_grad_dict = {'Z(AP)': {'0.4x0.2x0.4': 'AP 0.2 mm', '1': 'AP 1 mm', '2': 'AP 2 mm', '3': 'AP 3 mm'}, 'Y(SI)': {'0.4x0.2x0.4': 'SI 0.2 mm', '1': 'SI 1 mm', '2': 'SI 2 mm', '3': 'SI 3 mm'}} # grab analytical data sheet = 'Analytical' ref_path = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_data.xlsx' kf = mk.read_excel(ref_path, sheetname=sheet) mask = kf['CT slice spacing (mm)'] == '0.2mm' kf = kf.loc[mask] # Constrains to getting data # Constrains constrains = OrderedDict() constrains['Total_Volume'] = True constrains['getting_min'] = 'getting_min' constrains['getting_max'] = 'getting_max' constrains['average'] = 'average' constrains['D99'] = 99 constrains['D95'] = 95 constrains['D5'] = 5 constrains['D1'] = 1 constrains['Dcc'] = 0.03 # Get total_all analytical curves out = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_dvh.obj' an_curves = load(out) res = Partotal_allel(n_jobs=-1, verbose=11)( delayed(calc_data_total_all)(row, dose_files_dict, structure_dict, constrains, an_curves, col_grad_dict, delta_mm=delta_mm) for row in kf.traversal()) ref_results = [d[0] for d in res] calc_results = [d[1] for d in res] sname = [d[2] for d in res] curves = [d[3] for d in res] kf_ref_results = mk.concating(ref_results, axis=1).T.reseting_index() kf_calc_results = mk.concating(calc_results, axis=1).T.reseting_index() kf_ref_results['Structure name'] = sname kf_calc_results['Structure name'] = sname ref_num = kf_ref_results[kf_ref_results.columns[1:-2]] calc_num = kf_calc_results[kf_calc_results.columns[1:-2]] delta = ((calc_num - ref_num) / ref_num) * 100 res = OrderedDict() lim = 3 for col in delta: count = np.total_sum(np.abs(delta[col]) > lim) rg = np.array([value_round(delta[col].getting_min(), 2), value_round(delta[col].getting_max(), 2)]) res[col] = {'count': count, 'range': rg} test_table = mk.KnowledgeFrame(res).T print(test_table) if plot_curves: for c in curves: c.plot_results() plt.show() def test22(delta_mm=(0.1, 0.1, 0.1), up_sample_by_num=True, plot_curves=True): ref_data = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_data.xlsx' struc_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/STRUCTURES' dose_grid_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS' # # ref_data = r'D:\Dropbox\Plan_Competit st = 2 snames = ['Sphere_10_0', 'Sphere_20_0', 'Sphere_30_0', 'Cylinder_10_0', 'Cylinder_20_0', 'Cylinder_30_0', 'RtCylinder_10_0', 'RtCylinder_20_0', 'RtCylinder_30_0', 'Cone_10_0', 'Cone_20_0', 'Cone_30_0', 'RtCone_10_0', 'RtCone_20_0', 'RtCone_30_0'] structure_path = [os.path.join(struc_dir, f + '.dcm') for f in snames] structure_dict = dict(zip(snames, structure_path)) dose_files = [os.path.join(dose_grid_dir, f) for f in [ 'Linear_AntPost_1mm_Aligned.dcm', 'Linear_AntPost_2mm_Aligned.dcm', 'Linear_AntPost_3mm_Aligned.dcm', 'Linear_SupInf_1mm_Aligned.dcm', 'Linear_SupInf_2mm_Aligned.dcm', 'Linear_SupInf_3mm_Aligned.dcm']] # dose dict dose_files_dict = { 'Z(AP)': {'1': dose_files[0], '2': dose_files[1], '3': dose_files[2]}, 'Y(SI)': {'1': dose_files[3], '2': dose_files[4], '3': dose_files[5]}} col_grad_dict = {'Z(AP)': {'0.4x0.2x0.4': 'AP 0.2 mm', '1': 'AP 1 mm', '2': 'AP 2 mm', '3': 'AP 3 mm'}, 'Y(SI)': {'0.4x0.2x0.4': 'SI 0.2 mm', '1': 'SI 1 mm', '2': 'SI 2 mm', '3': 'SI 3 mm'}} # grab analytical data out = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_dvh.obj' an_curves = load(out) kf = mk.read_excel(ref_data, sheetname='Analytical') kfi = kf.ix[40:] mask0 = kfi['Structure Shift'] == 0 kfi = kfi.loc[mask0] # Constrains to getting data # Constrains constrains = OrderedDict() constrains['Total_Volume'] = True constrains['getting_min'] = 'getting_min' constrains['getting_max'] = 'getting_max' constrains['average'] = 'average' constrains['D99'] = 99 constrains['D95'] = 95 constrains['D5'] = 5 constrains['D1'] = 1 constrains['Dcc'] = 0.03 # GET CALCULATED DATA # backend = 'threading' res = Partotal_allel(n_jobs=-1, verbose=11)( delayed(calc_data_total_all)(row, dose_files_dict, structure_dict, constrains, an_curves, col_grad_dict, delta_mm=delta_mm, up_sample_by_num=up_sample_by_num) for row in kfi.traversal()) ref_results = [d[0] for d in res] calc_results = [d[1] for d in res] sname = [d[2] for d in res] curves = [d[3] for d in res] kf_ref_results = mk.concating(ref_results, axis=1).T.reseting_index() kf_calc_results = mk.concating(calc_results, axis=1).T.reseting_index() kf_ref_results['Structure name'] = sname kf_calc_results['Structure name'] = sname ref_num = kf_ref_results[kf_ref_results.columns[1:-2]] calc_num = kf_calc_results[kf_calc_results.columns[1:-2]] delta = ((calc_num - ref_num) / ref_num) * 100 res = OrderedDict() lim = 3 for col in delta: count = np.total_sum(np.abs(delta[col]) > lim) rg = np.array([value_round(delta[col].getting_min(), 2), value_round(delta[col].getting_max(), 2)]) res[col] = {'count': count, 'range': rg} test_table =
mk.KnowledgeFrame(res)
pandas.DataFrame
# -*- coding: utf-8 -*- # Author: <NAME> <<EMAIL>> # License: BSD """ Toolset working with yahoo finance data Module includes functions for easy access to YahooFinance data """ import urllib.request import numpy as np import requests # interaction with the web import os # file system operations import yaml # human-friendly data formating import re # regular expressions import monkey as mk # monkey... the best time collections library out there import datetime as dt # date and time functions import io from .extra import ProgressBar dateTimeFormat = "%Y%m%d %H:%M:%S" def parseStr(s): ''' convert string to a float or string ''' f = s.strip() if f[0] == '"': return f.strip('"') elif f=='N/A': return np.nan else: try: # try float conversion prefixes = {'M':1e6, 'B': 1e9} prefix = f[-1] if prefix in prefixes: # do we have a Billion/Million character? return float(f[:-1])*prefixes[prefix] else: # no, convert to float directly return float(f) except ValueError: # failed, return original string return s def gettingQuote(symbols): """ getting current yahoo quote Parameters ----------- symbols : list of str list of ticker symbols Returns ----------- KnowledgeFrame , data is row-wise """ # for codes see: http://www.gummy-stuff.org/Yahoo-data.htm if not incontainstance(symbols,list): symbols = [symbols] header_numer = ['symbol','final_item','change_pct','PE','time','short_ratio','prev_close','eps','market_cap'] request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1']) data = dict(list(zip(header_numer,[[] for i in range(length(header_numer))]))) urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request) try: lines = urllib.request.urlopen(urlStr).readlines() except Exception as e: s = "Failed to download:\n{0}".formating(e); print(s) for line in lines: fields = line.decode().strip().split(',') #print fields, length(fields) for i,field in enumerate(fields): data[header_numer[i]].adding( parseStr(field)) idx = data.pop('symbol') return
mk.KnowledgeFrame(data,index=idx)
pandas.DataFrame
from __future__ import divisionision from functools import wraps import monkey as mk import numpy as np import time import csv, sys import os.path import logging from .ted_functions import TedFunctions from .ted_aggregate_methods import TedAggregateMethods from base.uber_model import UberModel, ModelSharedInputs class TedSpeciesProperties(object): """ Listing of species properties that will eventutotal_ally be read in from a SQL db """ def __init__(self): """Class representing Species properties""" super(TedSpeciesProperties, self).__init__() self.sci_name = mk.Collections([], dtype='object') self.com_name = mk.Collections([], dtype='object') self.taxa = mk.Collections([], dtype='object') self.order = mk.Collections([], dtype='object') self.usfws_id = mk.Collections([], dtype='object') self.body_wgt = mk.Collections([], dtype='object') self.diet_item = mk.Collections([], dtype='object') self.h2o_cont = mk.Collections([], dtype='float') def read_species_properties(self): # this is a temporary method to initiate the species/diet food items lists (this will be replacingd with # a method to access a SQL database containing the properties #filengthame = './ted/tests/TEDSpeciesProperties.csv' filengthame = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv') try: with open(filengthame,'rt') as csvfile: # csv.DictReader uses first line in file for column header_numings by default dr = mk.read_csv(csvfile) # comma is default delimiter except csv.Error as e: sys.exit('file: %s, %s' (filengthame, e)) print(dr) self.sci_name = dr.ix[:,'Scientific Name'] self.com_name = dr.ix[:,'Common Name'] self.taxa = dr.ix[:,'Taxa'] self.order = dr.ix[:,'Order'] self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)'] self.body_wgt= dr.ix[:,'BW (g)'] self.diet_item = dr.ix[:,'Food item'] self.h2o_cont = dr.ix[:,'Water content of diet'] class TedInputs(ModelSharedInputs): """ Required inputs class for Ted. """ def __init__(self): """Class representing the inputs for Ted""" super(TedInputs, self).__init__() # Inputs: Assign object attribute variables from the input Monkey KnowledgeFrame self.chemical_name = mk.Collections([], dtype="object", name="chemical_name") # application parameters for getting_min/getting_max application scenarios self.crop_getting_min = mk.Collections([], dtype="object", name="crop") self.app_method_getting_min = mk.Collections([], dtype="object", name="app_method_getting_min") self.app_rate_getting_min = mk.Collections([], dtype="float", name="app_rate_getting_min") self.num_apps_getting_min = mk.Collections([], dtype="int", name="num_apps_getting_min") self.app_interval_getting_min = mk.Collections([], dtype="int", name="app_interval_getting_min") self.siplet_spec_getting_min = mk.Collections([], dtype="object", name="siplet_spec_getting_min") self.boom_hgt_getting_min = mk.Collections([], dtype="object", name="siplet_spec_getting_min") self.pest_incorp_depth_getting_min = mk.Collections([], dtype="object", name="pest_incorp_depth") self.crop_getting_max = mk.Collections([], dtype="object", name="crop") self.app_method_getting_max = mk.Collections([], dtype="object", name="app_method_getting_max") self.app_rate_getting_max = mk.Collections([], dtype="float", name="app_rate_getting_max") self.num_apps_getting_max = mk.Collections([], dtype="int", name="num_app_getting_maxs") self.app_interval_getting_max = mk.Collections([], dtype="int", name="app_interval_getting_max") self.siplet_spec_getting_max = mk.Collections([], dtype="object", name="siplet_spec_getting_max") self.boom_hgt_getting_max = mk.Collections([], dtype="object", name="siplet_spec_getting_max") self.pest_incorp_depth_getting_max = mk.Collections([], dtype="object", name="pest_incorp_depth") # physical, chemical, and fate properties of pesticide self.foliar_diss_hlife = mk.Collections([], dtype="float", name="foliar_diss_hlife") self.aerobic_soil_meta_hlife = mk.Collections([], dtype="float", name="aerobic_soil_meta_hlife") self.frac_retained_mamm = mk.Collections([], dtype="float", name="frac_retained_mamm") self.frac_retained_birds = mk.Collections([], dtype="float", name="frac_retained_birds") self.log_kow = mk.Collections([], dtype="float", name="log_kow") self.koc = mk.Collections([], dtype="float", name="koc") self.solubility = mk.Collections([], dtype="float", name="solubility") self.henry_law_const = mk.Collections([], dtype="float", name="henry_law_const") # bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter) self.aq_plant_algae_bcf_average = mk.Collections([], dtype="float", name="aq_plant_algae_bcf_average") self.aq_plant_algae_bcf_upper = mk.Collections([], dtype="float", name="aq_plant_algae_bcf_upper") self.inv_bcf_average = mk.Collections([], dtype="float", name="inv_bcf_average") self.inv_bcf_upper = mk.Collections([], dtype="float", name="inv_bcf_upper") self.fish_bcf_average = mk.Collections([], dtype="float", name="fish_bcf_average") self.fish_bcf_upper = mk.Collections([], dtype="float", name="fish_bcf_upper") # bounding water concentrations (ug active ing/liter) self.water_conc_1 = mk.Collections([], dtype="float", name="water_conc_1") # lower bound self.water_conc_2 = mk.Collections([], dtype="float", name="water_conc_2") # upper bound # health value inputs # nagetting_ming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet): # dbt: dose based toxicity # cbt: concentration-based toxicity # arbt: application rate-based toxicity # 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l) # 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l) # others are self explanatory # dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams) self.dbt_mamm_1inmill_mort = mk.Collections([], dtype="float", name="dbt_mamm_1inmill_mort") self.dbt_mamm_1inten_mort = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort") self.dbt_mamm_low_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_low_ld50") self.dbt_mamm_rat_oral_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort") self.dbt_mamm_rat_derm_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_rat_derm_ld50") self.dbt_mamm_rat_inhal_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_rat_inhal_ld50") self.dbt_mamm_sub_direct = mk.Collections([], dtype="float", name="dbt_mamm_sub_direct") self.dbt_mamm_sub_indirect = mk.Collections([], dtype="float", name="dbt_mamm_sub_indirect") self.dbt_mamm_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inmill_mort_wgt") self.dbt_mamm_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort_wgt") self.dbt_mamm_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_low_ld50_wgt") self.dbt_mamm_rat_oral_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort_wgt") self.dbt_mamm_rat_derm_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt") self.dbt_mamm_rat_inhal_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt") self.dbt_mamm_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_mamm_sub_direct_wgt") self.dbt_mamm_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_mamm_sub_indirect_wgt") # dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams) self.dbt_bird_1inmill_mort = mk.Collections([], dtype="float", name="dbt_bird_1inmill_mort") self.dbt_bird_1inten_mort = mk.Collections([], dtype="float", name="dbt_bird_1inten_mort") self.dbt_bird_low_ld50 = mk.Collections([], dtype="float", name="dbt_bird_low_ld50") self.dbt_bird_hc05 = mk.Collections([], dtype="float", name="dbt_bird_hc05") self.dbt_bird_hc50 = mk.Collections([], dtype="float", name="dbt_bird_hc50") self.dbt_bird_hc95 = mk.Collections([], dtype="float", name="dbt_bird_hc95") self.dbt_bird_sub_direct = mk.Collections([], dtype="float", name="dbt_bird_sub_direct") self.dbt_bird_sub_indirect = mk.Collections([], dtype="float", name="dbt_bird_sub_indirect") self.getting_mineau_sca_fact = mk.Collections([], dtype="float", name="getting_mineau_sca_fact") self.dbt_bird_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_bird_1inmill_mort_wgt") self.dbt_bird_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_bird_1inten_mort_wgt") self.dbt_bird_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_bird_low_ld50_wgt") self.dbt_bird_hc05_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc05_wgt") self.dbt_bird_hc50_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc50_wgt") self.dbt_bird_hc95_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc95_wgt") self.dbt_bird_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_bird_sub_direct_wgt") self.dbt_bird_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_bird_sub_indirect_wgt") self.getting_mineau_sca_fact_wgt = mk.Collections([], dtype="float", name="getting_mineau_sca_fact_wgt") # dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams) self.dbt_reptile_1inmill_mort = mk.Collections([], dtype="float", name="dbt_reptile_1inmill_mort") self.dbt_reptile_1inten_mort = mk.Collections([], dtype="float", name="dbt_reptile_1inten_mort") self.dbt_reptile_low_ld50 = mk.Collections([], dtype="float", name="dbt_reptile_low_ld50") self.dbt_reptile_sub_direct = mk.Collections([], dtype="float", name="dbt_reptile_sub_direct") self.dbt_reptile_sub_indirect = mk.Collections([], dtype="float", name="dbt_reptile_sub_indirect") self.dbt_reptile_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_reptile_1inmill_mort_wgt") self.dbt_reptile_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_reptile_1inten_mort_wgt") self.dbt_reptile_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_reptile_low_ld50_wgt") self.dbt_reptile_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_reptile_sub_direct_wgt") self.dbt_reptile_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_reptile_sub_indirect_wgt") # concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food) self.cbt_mamm_1inmill_mort = mk.Collections([], dtype="float", name="cbt_mamm_1inmill_mort") self.cbt_mamm_1inten_mort = mk.Collections([], dtype="float", name="cbt_mamm_1inten_mort") self.cbt_mamm_low_lc50 = mk.Collections([], dtype="float", name="cbt_mamm_low_lc50") self.cbt_mamm_sub_direct = mk.Collections([], dtype="float", name="cbt_mamm_sub_direct") self.cbt_mamm_grow_noec = mk.Collections([], dtype="float", name="cbt_mamm_grow_noec") self.cbt_mamm_grow_loec = mk.Collections([], dtype="float", name="cbt_mamm_grow_loec") self.cbt_mamm_repro_noec = mk.Collections([], dtype="float", name="cbt_mamm_repro_noec") self.cbt_mamm_repro_loec = mk.Collections([], dtype="float", name="cbt_mamm_repro_loec") self.cbt_mamm_behav_noec = mk.Collections([], dtype="float", name="cbt_mamm_behav_noec") self.cbt_mamm_behav_loec = mk.Collections([], dtype="float", name="cbt_mamm_behav_loec") self.cbt_mamm_sensory_noec = mk.Collections([], dtype="float", name="cbt_mamm_sensory_noec") self.cbt_mamm_sensory_loec = mk.Collections([], dtype="float", name="cbt_mamm_sensory_loec") self.cbt_mamm_sub_indirect = mk.Collections([], dtype="float", name="cbt_mamm_sub_indirect") # concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food) self.cbt_bird_1inmill_mort = mk.Collections([], dtype="float", name="cbt_bird_1inmill_mort") self.cbt_bird_1inten_mort = mk.Collections([], dtype="float", name="cbt_bird_1inten_mort") self.cbt_bird_low_lc50 = mk.Collections([], dtype="float", name="cbt_bird_low_lc50") self.cbt_bird_sub_direct = mk.Collections([], dtype="float", name="cbt_bird_sub_direct") self.cbt_bird_grow_noec = mk.Collections([], dtype="float", name="cbt_bird_grow_noec") self.cbt_bird_grow_loec = mk.Collections([], dtype="float", name="cbt_bird_grow_loec") self.cbt_bird_repro_noec = mk.Collections([], dtype="float", name="cbt_bird_repro_noec") self.cbt_bird_repro_loec = mk.Collections([], dtype="float", name="cbt_bird_repro_loec") self.cbt_bird_behav_noec = mk.Collections([], dtype="float", name="cbt_bird_behav_noec") self.cbt_bird_behav_loec = mk.Collections([], dtype="float", name="cbt_bird_behav_loec") self.cbt_bird_sensory_noec = mk.Collections([], dtype="float", name="cbt_bird_sensory_noec") self.cbt_bird_sensory_loec = mk.Collections([], dtype="float", name="cbt_bird_sensory_loec") self.cbt_bird_sub_indirect = mk.Collections([], dtype="float", name="cbt_bird_sub_indirect") # concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food) self.cbt_reptile_1inmill_mort = mk.Collections([], dtype="float", name="cbt_reptile_1inmill_mort") self.cbt_reptile_1inten_mort = mk.Collections([], dtype="float", name="cbt_reptile_1inten_mort") self.cbt_reptile_low_lc50 = mk.Collections([], dtype="float", name="cbt_reptile_low_lc50") self.cbt_reptile_sub_direct = mk.Collections([], dtype="float", name="cbt_reptile_sub_direct") self.cbt_reptile_grow_noec = mk.Collections([], dtype="float", name="cbt_reptile_grow_noec") self.cbt_reptile_grow_loec = mk.Collections([], dtype="float", name="cbt_reptile_grow_loec") self.cbt_reptile_repro_noec = mk.Collections([], dtype="float", name="cbt_reptile_repro_noec") self.cbt_reptile_repro_loec = mk.Collections([], dtype="float", name="cbt_reptile_repro_loec") self.cbt_reptile_behav_noec = mk.Collections([], dtype="float", name="cbt_reptile_behav_noec") self.cbt_reptile_behav_loec = mk.Collections([], dtype="float", name="cbt_reptile_behav_loec") self.cbt_reptile_sensory_noec = mk.Collections([], dtype="float", name="cbt_reptile_sensory_noec") self.cbt_reptile_sensory_loec = mk.Collections([], dtype="float", name="cbt_reptile_sensory_loec") self.cbt_reptile_sub_indirect = mk.Collections([], dtype="float", name="cbt_reptile_sub_indirect") # concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww)) self.cbt_inv_bw_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_bw_1inmill_mort") self.cbt_inv_bw_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_bw_1inten_mort") self.cbt_inv_bw_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_bw_low_lc50") self.cbt_inv_bw_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_bw_sub_direct") self.cbt_inv_bw_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_grow_noec") self.cbt_inv_bw_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_grow_loec") self.cbt_inv_bw_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_repro_noec") self.cbt_inv_bw_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_repro_loec") self.cbt_inv_bw_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_behav_noec") self.cbt_inv_bw_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_behav_loec") self.cbt_inv_bw_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_sensory_noec") self.cbt_inv_bw_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_sensory_loec") self.cbt_inv_bw_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_bw_sub_indirect") # concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww)) self.cbt_inv_food_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_food_1inmill_mort") self.cbt_inv_food_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_food_1inten_mort") self.cbt_inv_food_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_food_low_lc50") self.cbt_inv_food_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_food_sub_direct") self.cbt_inv_food_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_food_grow_noec") self.cbt_inv_food_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_food_grow_loec") self.cbt_inv_food_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_food_repro_noec") self.cbt_inv_food_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_food_repro_loec") self.cbt_inv_food_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_food_behav_noec") self.cbt_inv_food_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_food_behav_loec") self.cbt_inv_food_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_food_sensory_noec") self.cbt_inv_food_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_food_sensory_loec") self.cbt_inv_food_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_food_sub_indirect") # concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw)) self.cbt_inv_soil_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_soil_1inmill_mort") self.cbt_inv_soil_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_soil_1inten_mort") self.cbt_inv_soil_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_soil_low_lc50") self.cbt_inv_soil_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_soil_sub_direct") self.cbt_inv_soil_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_grow_noec") self.cbt_inv_soil_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_grow_loec") self.cbt_inv_soil_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_repro_noec") self.cbt_inv_soil_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_repro_loec") self.cbt_inv_soil_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_behav_noec") self.cbt_inv_soil_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_behav_loec") self.cbt_inv_soil_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_sensory_noec") self.cbt_inv_soil_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_sensory_loec") self.cbt_inv_soil_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_soil_sub_indirect") # application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre) self.arbt_mamm_mort = mk.Collections([], dtype="float", name="arbt_mamm_mort") self.arbt_mamm_growth = mk.Collections([], dtype="float", name="arbt_mamm_growth") self.arbt_mamm_repro = mk.Collections([], dtype="float", name="arbt_mamm_repro") self.arbt_mamm_behav = mk.Collections([], dtype="float", name="arbt_mamm_behav") self.arbt_mamm_sensory = mk.Collections([], dtype="float", name="arbt_mamm_sensory") # application rate-based toxicity (arbt) : birds (lbs active ingredient/Acre) self.arbt_bird_mort = mk.Collections([], dtype="float", name="arbt_bird_mort") self.arbt_bird_growth = mk.Collections([], dtype="float", name="arbt_bird_growth") self.arbt_bird_repro = mk.Collections([], dtype="float", name="arbt_bird_repro") self.arbt_bird_behav = mk.Collections([], dtype="float", name="arbt_bird_behav") self.arbt_bird_sensory = mk.Collections([], dtype="float", name="arbt_bird_sensory") # application rate-based toxicity (arbt) : reptiles (lbs active ingredient/Acre) self.arbt_reptile_mort = mk.Collections([], dtype="float", name="arbt_reptile_mort") self.arbt_reptile_growth = mk.Collections([], dtype="float", name="arbt_reptile_growth") self.arbt_reptile_repro = mk.Collections([], dtype="float", name="arbt_reptile_repro") self.arbt_reptile_behav = mk.Collections([], dtype="float", name="arbt_reptile_behav") self.arbt_reptile_sensory = mk.Collections([], dtype="float", name="arbt_reptile_sensory") # application rate-based toxicity (arbt) : invertebrates (lbs active ingredient/Acre) self.arbt_inv_1inmill_mort = mk.Collections([], dtype="float", name="arbt_inv_1inmill_mort") self.arbt_inv_1inten_mort = mk.Collections([], dtype="float", name="arbt_inv_1inten_mort") self.arbt_inv_sub_direct = mk.Collections([], dtype="float", name="arbt_inv_sub_direct") self.arbt_inv_sub_indirect = mk.Collections([], dtype="float", name="arbt_inv_sub_indirect") self.arbt_inv_growth = mk.Collections([], dtype="float", name="arbt_inv_growth") self.arbt_inv_repro = mk.Collections([], dtype="float", name="arbt_inv_repro") self.arbt_inv_behav = mk.Collections([], dtype="float", name="arbt_inv_behav") self.arbt_inv_sensory =
mk.Collections([], dtype="float", name="arbt_inv_sensory")
pandas.Series
from flowsa.common import WITHDRAWN_KEYWORD from flowsa.flowbyfunctions import total_allocate_fips_location_system from flowsa.location import US_FIPS import math import monkey as mk import io from flowsa.settings import log from string import digits YEARS_COVERED = { "asbestos": "2014-2018", "barite": "2014-2018", "bauxite": "2013-2017", "beryllium": "2014-2018", "boron": "2014-2018", "chromium": "2014-2018", "clay": "2015-2016", "cobalt": "2013-2017", "copper": "2011-2015", "diatomite": "2014-2018", "feldspar": "2013-2017", "fluorspar": "2013-2017", "fluorspar_inports": ["2016", "2017"], "gtotal_allium": "2014-2018", "garnet": "2014-2018", "gold": "2013-2017", "graphite": "2013-2017", "gyptotal_sum": "2014-2018", "iodine": "2014-2018", "ironore": "2014-2018", "kyanite": "2014-2018", "lead": "2012-2018", "lime": "2014-2018", "lithium": "2013-2017", "magnesium": "2013-2017", "manganese": "2012-2016", "manufacturedabrasive": "2017-2018", "mica": "2014-2018", "molybdenum": "2014-2018", "nickel": "2012-2016", "niobium": "2014-2018", "peat": "2014-2018", "perlite": "2013-2017", "phosphate": "2014-2018", "platinum": "2014-2018", "potash": "2014-2018", "pumice": "2014-2018", "rhenium": "2014-2018", "salt": "2013-2017", "sandgflat_underlyingconstruction": "2013-2017", "sandgflat_underlyingindustrial": "2014-2018", "silver": "2012-2016", "sodaash": "2010-2017", "sodaash_t4": ["2016", "2017"], "stonecrushed": "2013-2017", "stonedimension": "2013-2017", "strontium": "2014-2018", "talc": "2013-2017", "titanium": "2013-2017", "tungsten": "2013-2017", "vermiculite": "2014-2018", "zeolites": "2014-2018", "zinc": "2013-2017", "zirconium": "2013-2017", } def usgs_myb_year(years, current_year_str): """ Sets the column for the string based on the year. Checks that the year you picked is in the final_item file. :param years: string, with hypthon :param current_year_str: string, year of interest :return: string, year """ years_array = years.split("-") lower_year = int(years_array[0]) upper_year = int(years_array[1]) current_year = int(current_year_str) if lower_year <= current_year <= upper_year: column_val = current_year - lower_year + 1 return "year_" + str(column_val) else: log.info("Your year is out of scope. Pick a year between %s and %s", lower_year, upper_year) def usgs_myb_name(USGS_Source): """ Takes the USGS source name and parses it so it can be used in other parts of Flow by activity. :param USGS_Source: string, usgs source name :return: """ source_split = USGS_Source.split("_") name_cc = str(source_split[2]) name = "" for char in name_cc: if char.isupper(): name = name + " " + char else: name = name + char name = name.lower() name = name.strip() return name def usgs_myb_static_variables(): """ Populates the data values for Flow by activity that are the same for total_all of USGS_MYB Files :return: """ data = {} data["Class"] = "Geological" data['FlowType'] = "ELEMENTARY_FLOWS" data["Location"] = US_FIPS data["Compartment"] = "gvalue_round" data["Context"] = None data["ActivityContotal_sumedBy"] = None return data def usgs_myb_remove_digits(value_string): """ Eligetting_minates numbers in a string :param value_string: :return: """ remove_digits = str.maketrans('', '', digits) return_string = value_string.translate(remove_digits) return return_string def usgs_myb_url_helper(*, build_url, **_): """ This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for data imports that requires parts of the url text string to be replacingd with info specific to the data year. This function does not parse the data, only modifies the urls from which data is obtained. :param build_url: string, base url :param config: dictionary, items in FBA method yaml :param args: dictionary, arguments specified when running flowbyactivity.py flowbyactivity.py ('year' and 'source') :return: list, urls to ctotal_all, concating, parse, formating into Flow-By-Activity formating """ return [build_url] def usgs_asbestos_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[4:11]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data.columns) > 12: for x in range(12, length(kf_data.columns)): col_name = "Unnamed: " + str(x) del kf_data[col_name] if length(kf_data. columns) == 12: kf_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['asbestos'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_asbestos_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] product = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:": product = "imports" elif kf.iloc[index]["Production"].strip() == \ "Exports and reexports:": product = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(kf.iloc[index][col_name]) == "nan": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system(knowledgeframe, str(year)) return knowledgeframe def usgs_barite_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel( io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:14]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data. columns) == 11: kf_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['barite'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_barite_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['barite'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:3": product = "imports" elif kf.iloc[index]["Production"].strip() == \ "Crude, sold or used by producers:": product = "production" elif kf.iloc[index]["Production"].strip() == "Exports:2": product = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['barite'], year) if str(kf.iloc[index][col_name]) == "--" or \ str(kf.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_bauxite_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:14]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_one. columns) == 11: kf_data_one.columns = ["Production", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['bauxite'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_bauxite_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Total"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Production": prod = "production" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption, as shipped:": prod = "import" elif kf.iloc[index]["Production"].strip() == \ "Exports, as shipped:": prod = "export" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" flow_amount = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = flow_amount data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_beryllium_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T4') kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_1 = mk.KnowledgeFrame(kf_raw_data_two.loc[6:9]).reindexing() kf_data_1 = kf_data_1.reseting_index() del kf_data_1["index"] kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[12:12]).reindexing() kf_data_2 = kf_data_2.reseting_index() del kf_data_2["index"] if length(kf_data_2.columns) > 11: for x in range(11, length(kf_data_2.columns)): col_name = "Unnamed: " + str(x) del kf_data_2[col_name] if length(kf_data_1. columns) == 11: kf_data_1.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] if length(kf_data_2. columns) == 11: kf_data_2.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['beryllium'], year)) for col in kf_data_1.columns: if col not in col_to_use: del kf_data_1[col] for col in kf_data_2.columns: if col not in col_to_use: del kf_data_2[col] frames = [kf_data_1, kf_data_2] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_beryllium_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["United States6", "Mine shipments1", "Imports for contotal_sumption, beryl2"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year) for kf in kf_list: for index, row in kf.traversal(): prod = "production" if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption, beryl2": prod = "imports" if kf.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = kf.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["Description"] = name data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_boron_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data.loc[8:8]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] kf_data_two = mk.KnowledgeFrame(kf_raw_data.loc[21:22]).reindexing() kf_data_two = kf_data_two.reseting_index() del kf_data_two["index"] kf_data_three = mk.KnowledgeFrame(kf_raw_data.loc[27:28]).reindexing() kf_data_three = kf_data_three.reseting_index() del kf_data_three["index"] if length(kf_data_one. columns) == 11: kf_data_one.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] kf_data_two.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] kf_data_three.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['boron'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] del kf_data_two[col] del kf_data_three[col] frames = [kf_data_one, kf_data_two, kf_data_three] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_boron_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["B2O3 content", "Quantity"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['boron'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "B2O3 content" or \ kf.iloc[index]["Production"].strip() == "Quantity": product = "production" if kf.iloc[index]["Production"].strip() == "Colemanite:4": des = "Colemanite" elif kf.iloc[index]["Production"].strip() == "Ulexite:4": des = "Ulexite" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" if des == name: data['FlowName'] = name + " " + product else: data['FlowName'] = name + " " + product + " " + des data["Description"] = des data["ActivityProducedBy"] = name if str(kf.iloc[index][col_name]) == "--" or \ str(kf.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) elif str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_chromium_ctotal_all(*, resp, year, **_): """" Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[4:24]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data. columns) == 12: kf_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] elif length(kf_data. columns) == 13: kf_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5", "space_6"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['chromium'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_chromium_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Secondary2", "Total"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['chromium'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Imports:": product = "imports" elif kf.iloc[index]["Production"].strip() == "Secondary2": product = "production" elif kf.iloc[index]["Production"].strip() == "Exports:": product = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['chromium'], year) if str(kf.iloc[index][col_name]) == "--" or \ str(kf.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_clay_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data_btotal_all = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T3') kf_data_btotal_all = mk.KnowledgeFrame(kf_raw_data_btotal_all.loc[19:19]).reindexing() kf_data_btotal_all = kf_data_btotal_all.reseting_index() del kf_data_btotal_all["index"] kf_raw_data_bentonite = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T4 ') kf_data_bentonite = mk.KnowledgeFrame( kf_raw_data_bentonite.loc[28:28]).reindexing() kf_data_bentonite = kf_data_bentonite.reseting_index() del kf_data_bentonite["index"] kf_raw_data_common = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T5 ') kf_data_common = mk.KnowledgeFrame(kf_raw_data_common.loc[40:40]).reindexing() kf_data_common = kf_data_common.reseting_index() del kf_data_common["index"] kf_raw_data_fire = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T6 ') kf_data_fire = mk.KnowledgeFrame(kf_raw_data_fire.loc[12:12]).reindexing() kf_data_fire = kf_data_fire.reseting_index() del kf_data_fire["index"] kf_raw_data_fuller = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T7 ') kf_data_fuller = mk.KnowledgeFrame(kf_raw_data_fuller.loc[17:17]).reindexing() kf_data_fuller = kf_data_fuller.reseting_index() del kf_data_fuller["index"] kf_raw_data_kaolin = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T8 ') kf_data_kaolin = mk.KnowledgeFrame(kf_raw_data_kaolin.loc[18:18]).reindexing() kf_data_kaolin = kf_data_kaolin.reseting_index() del kf_data_kaolin["index"] kf_raw_data_export = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T13') kf_data_export = mk.KnowledgeFrame(kf_raw_data_export.loc[6:15]).reindexing() kf_data_export = kf_data_export.reseting_index() del kf_data_export["index"] kf_raw_data_import = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T14') kf_data_import = mk.KnowledgeFrame(kf_raw_data_import.loc[6:13]).reindexing() kf_data_import = kf_data_import.reseting_index() del kf_data_import["index"] kf_data_btotal_all.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] kf_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] kf_data_common.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] kf_data_fire.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] kf_data_fuller.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] kf_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] kf_data_export.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2", "space_5", "extra"] kf_data_import.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2", "space_5", "extra"] kf_data_btotal_all["type"] = "Btotal_all clay" kf_data_bentonite["type"] = "Bentonite" kf_data_common["type"] = "Common clay" kf_data_fire["type"] = "Fire clay" kf_data_fuller["type"] = "Fullerโ€™s earth" kf_data_kaolin["type"] = "Kaolin" kf_data_export["type"] = "export" kf_data_import["type"] = "import" col_to_use = ["Production", "type"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['clay'], year)) for col in kf_data_import.columns: if col not in col_to_use: del kf_data_import[col] del kf_data_export[col] for col in kf_data_btotal_all.columns: if col not in col_to_use: del kf_data_btotal_all[col] del kf_data_bentonite[col] del kf_data_common[col] del kf_data_fire[col] del kf_data_fuller[col] del kf_data_kaolin[col] frames = [kf_data_import, kf_data_export, kf_data_btotal_all, kf_data_bentonite, kf_data_common, kf_data_fire, kf_data_fuller, kf_data_kaolin] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_clay_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Btotal_all clay", "Bentonite", "Fire clay", "Kaolin", "Fullerโ€™s earth", "Total", "Grand total", "Artificitotal_ally activated clay and earth", "Clays, not elsewhere classified", "Clays, not elsewhere classified"] knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["type"].strip() == "import": product = "imports" elif kf.iloc[index]["type"].strip() == "export": product = "exports" else: product = "production" if str(kf.iloc[index]["Production"]).strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" if product == "production": data['FlowName'] = \ kf.iloc[index]["type"].strip() + " " + product data["Description"] = kf.iloc[index]["type"].strip() data["ActivityProducedBy"] = kf.iloc[index]["type"].strip() else: data['FlowName'] = \ kf.iloc[index]["Production"].strip() + " " + product data["Description"] = kf.iloc[index]["Production"].strip() data["ActivityProducedBy"] = \ kf.iloc[index]["Production"].strip() col_name = usgs_myb_year(YEARS_COVERED['clay'], year) if str(kf.iloc[index][col_name]) == "--" or \ str(kf.iloc[index][col_name]) == "(3)" or \ str(kf.iloc[index][col_name]) == "(2)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_cobalt_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T8') kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_1 = mk.KnowledgeFrame(kf_raw_data_two.loc[6:11]).reindexing() kf_data_1 = kf_data_1.reseting_index() del kf_data_1["index"] kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[23:23]).reindexing() kf_data_2 = kf_data_2.reseting_index() del kf_data_2["index"] if length(kf_data_2.columns) > 11: for x in range(11, length(kf_data_2.columns)): col_name = "Unnamed: " + str(x) del kf_data_2[col_name] if length(kf_data_1. columns) == 12: kf_data_1.columns = ["Production", "space_6", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] if length(kf_data_2. columns) == 11: kf_data_2.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['cobalt'], year)) for col in kf_data_1.columns: if col not in col_to_use: del kf_data_1[col] for col in kf_data_2.columns: if col not in col_to_use: del kf_data_2[col] frames = [kf_data_1, kf_data_2] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_cobalt_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name row_to_use = ["United Statese, 16, 17", "Mine productione", "Imports for contotal_sumption", "Exports"] knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): prod = "production" if kf.iloc[index]["Production"].strip() == \ "United Statese, 16, 17": prod = "production" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption": prod = "imports" elif kf.iloc[index]["Production"].strip() == "Exports": prod = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = kf.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year) data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod data["FlowAmount"] = str(kf.iloc[index][col_name]) remove_rows = ["(18)", "(2)"] if data["FlowAmount"] not in remove_rows: knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_copper_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_1 = mk.KnowledgeFrame(kf_raw_data.loc[12:12]).reindexing() kf_data_1 = kf_data_1.reseting_index() del kf_data_1["index"] kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[30:31]).reindexing() kf_data_2 = kf_data_2.reseting_index() del kf_data_2["index"] if length(kf_data_1. columns) == 12: kf_data_1.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] kf_data_2.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production", "Unit"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['copper'], year)) for col in kf_data_1.columns: if col not in col_to_use: del kf_data_1[col] for col in kf_data_2.columns: if col not in col_to_use: del kf_data_2[col] frames = [kf_data_1, kf_data_2] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_copper_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): remove_digits = str.maketrans('', '', digits) product = kf.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) if product == "Total": prod = "production" elif product == "Exports, refined": prod = "exports" elif product == "Imports, refined": prod = "imports" data["ActivityProducedBy"] = "Copper; Mine" data['FlowName'] = name + " " + prod data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['copper'], year) data["Description"] = "Copper; Mine" data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_diatomite_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:10]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_one.columns) == 10: kf_data_one.columns = ["Production", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['diatomite'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_diatomite_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Exports2", "Imports for contotal_sumption2"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports2": prod = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption2": prod = "imports" elif kf.iloc[index]["Production"].strip() == "Quantity": prod = "production" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand metric tons" col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year) data["FlowAmount"] = str(kf.iloc[index][col_name]) data["Description"] = name data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_feldspar_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_two = mk.KnowledgeFrame(kf_raw_data_two.loc[4:8]).reindexing() kf_data_two = kf_data_two.reseting_index() del kf_data_two["index"] kf_data_one = mk.KnowledgeFrame(kf_raw_data_two.loc[10:15]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_two. columns) == 13: kf_data_two.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] kf_data_one.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['feldspar'], year)) for col in kf_data_two.columns: if col not in col_to_use: del kf_data_two[col] del kf_data_one[col] frames = [kf_data_two, kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_feldspar_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Quantity3"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports, feldspar:4": prod = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:4": prod = "imports" elif kf.iloc[index]["Production"].strip() == \ "Production, feldspar:e, 2": prod = "production" elif kf.iloc[index]["Production"].strip() == "Nepheline syenite:": prod = "production" des = "Nepheline syenite" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year) data["FlowAmount"] = str(kf.iloc[index][col_name]) data["Description"] = des data["ActivityProducedBy"] = name if name == des: data['FlowName'] = name + " " + prod else: data['FlowName'] = name + " " + prod + " " + des knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_fluorspar_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') if year in YEARS_COVERED['fluorspar_inports']: kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T2') kf_raw_data_three = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T7') kf_raw_data_four = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T8') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[5:15]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if year in YEARS_COVERED['fluorspar_inports']: kf_data_two = mk.KnowledgeFrame(kf_raw_data_two.loc[7:8]).reindexing() kf_data_three = mk.KnowledgeFrame(kf_raw_data_three.loc[19:19]).reindexing() kf_data_four = mk.KnowledgeFrame(kf_raw_data_four.loc[11:11]).reindexing() if length(kf_data_two.columns) == 13: kf_data_two.columns = ["Production", "space_1", "not_1", "space_2", "not_2", "space_3", "not_3", "space_4", "not_4", "space_5", "year_4", "space_6", "year_5"] if length(kf_data_three.columns) == 9: kf_data_three.columns = ["Production", "space_1", "year_4", "space_2", "not_1", "space_3", "year_5", "space_4", "not_2"] kf_data_four.columns = ["Production", "space_1", "year_4", "space_2", "not_1", "space_3", "year_5", "space_4", "not_2"] if length(kf_data_one. columns) == 13: kf_data_one.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['fluorspar'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] if year in YEARS_COVERED['fluorspar_inports']: for col in kf_data_two.columns: if col not in col_to_use: del kf_data_two[col] for col in kf_data_three.columns: if col not in col_to_use: del kf_data_three[col] for col in kf_data_four.columns: if col not in col_to_use: del kf_data_four[col] kf_data_one["type"] = "data_one" if year in YEARS_COVERED['fluorspar_inports']: # alugetting_minum fluoride # cryolite kf_data_two["type"] = "data_two" kf_data_three["type"] = "Alugetting_minum Fluoride" kf_data_four["type"] = "Cryolite" frames = [kf_data_one, kf_data_two, kf_data_three, kf_data_four] else: frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_fluorspar_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid", "Mettotal_allurgical", "Production"] prod = "" name = usgs_myb_name(source) knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports:3": prod = "exports" des = name elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:3": prod = "imports" des = name elif kf.iloc[index]["Production"].strip() == "Fluorosilicic acid:": prod = "production" des = "Fluorosilicic acid:" if str(kf.iloc[index]["type"]).strip() == "data_two": prod = "imports" des = kf.iloc[index]["Production"].strip() elif str(kf.iloc[index]["type"]).strip() == \ "Alugetting_minum Fluoride" or \ str(kf.iloc[index]["type"]).strip() == "Cryolite": prod = "imports" des = kf.iloc[index]["type"].strip() if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(kf.iloc[index][col_name]) data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_gtotal_allium_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[5:7]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data.columns) > 11: for x in range(11, length(kf_data.columns)): col_name = "Unnamed: " + str(x) del kf_data[col_name] if length(kf_data.columns) == 11: kf_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['gtotal_allium'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_gtotal_allium_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Production, primary crude", "Metal"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['gtotal_allium'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:": product = "imports" elif kf.iloc[index]["Production"].strip() == \ "Production, primary crude": product = "production" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Kilograms" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['gtotal_allium'], year) if str(kf.iloc[index][col_name]).strip() == "--": data["FlowAmount"] = str(0) elif str(kf.iloc[index][col_name]) == "nan": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_garnet_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_two = mk.KnowledgeFrame(kf_raw_data_two.loc[4:5]).reindexing() kf_data_two = kf_data_two.reseting_index() del kf_data_two["index"] kf_data_one = mk.KnowledgeFrame(kf_raw_data_two.loc[10:14]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_one.columns) > 13: for x in range(13, length(kf_data_one.columns)): col_name = "Unnamed: " + str(x) del kf_data_one[col_name] del kf_data_two[col_name] if length(kf_data_two. columns) == 13: kf_data_two.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] kf_data_one.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['garnet'], year)) for col in kf_data_two.columns: if col not in col_to_use: del kf_data_two[col] del kf_data_one[col] frames = [kf_data_two, kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_garnet_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports:2": prod = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption: 3": prod = "imports" elif kf.iloc[index]["Production"].strip() == "Crude production:": prod = "production" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['garnet'], year) data["FlowAmount"] = str(kf.iloc[index][col_name]) data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_gold_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[6:14]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data.columns) == 13: kf_data.columns = ["Production", "Space", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['gold'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_gold_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Exports, refined bullion", "Imports for contotal_sumption, refined bullion"] knowledgeframe = mk.KnowledgeFrame() product = "production" name = usgs_myb_name(source) des = name for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Quantity": product = "production" elif kf.iloc[index]["Production"].strip() == \ "Exports, refined bullion": product = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption, refined bullion": product = "imports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "kilograms" data['FlowName'] = name + " " + product data["Description"] = des data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['gold'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_graphite_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[5:9]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data. columns) == 13: kf_data.columns = ["Production", "space_1", "Unit", "space_6", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['graphite'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_graphite_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantiy", "Quantity"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['graphite'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:": product = "imports" elif kf.iloc[index]["Production"].strip() == "Exports:": product = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['graphite'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(kf.iloc[index][col_name]) == "nan": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_gyptotal_sum_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:10]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_one.columns) > 11: for x in range(11, length(kf_data_one.columns)): col_name = "Unnamed: " + str(x) del kf_data_one[col_name] if length(kf_data_one.columns) == 11: kf_data_one.columns = ["Production", "space_1", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['gyptotal_sum'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_gyptotal_sum_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Imports for contotal_sumption"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['gyptotal_sum'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption": prod = "imports" elif kf.iloc[index]["Production"].strip() == "Quantity": prod = "production" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_iodine_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[6:10]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data. columns) == 11: kf_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] elif length(kf_data. columns) == 13: kf_data.columns = ["Production", "unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5", "space_6"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['iodine'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_iodine_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Quantity, for contotal_sumption", "Exports2"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['iodine'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Imports:2": product = "imports" elif kf.iloc[index]["Production"].strip() == "Production": product = "production" elif kf.iloc[index]["Production"].strip() == "Exports2": product = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['iodine'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_iron_ore_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:25]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data. columns) == 12: kf_data.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production", "Units"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['ironore'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_iron_ore_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name row_to_use = ["Gross weight", "Quantity"] knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Production:": product = "production" elif kf.iloc[index]["Production"].strip() == "Exports:": product = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:": product = "imports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data['FlowName'] = "Iron Ore " + product data["Description"] = "Iron Ore" data["ActivityProducedBy"] = "Iron Ore" col_name = usgs_myb_year(YEARS_COVERED['ironore'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_kyanite_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[4:13]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_one. columns) == 12: kf_data_one.columns = ["Production", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['kyanite'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_kyanite_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Quantity2"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Exports of kyanite concentrate:3": prod = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption, total_all kyanite getting_minerals:3": prod = "imports" elif kf.iloc[index]["Production"].strip() == "Production:": prod = "production" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_lead_url_helper(*, year, **_): """ This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for data imports that requires parts of the url text string to be replacingd with info specific to the data year. This function does not parse the data, only modifies the urls from which data is obtained. :param build_url: string, base url :return: list, urls to ctotal_all, concating, parse, formating into Flow-By-Activity formating """ if int(year) < 2013: build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/' 'ptotal_alladium/production/atoms/files/myb1-2016-lead.xls') elif int(year) < 2014: build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/' 'ptotal_alladium/production/atoms/files/myb1-2017-lead.xls') else: build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/' 'ptotal_alladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx') url = build_url return [url] def usgs_lead_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[8:15]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data.columns) > 12: for x in range(12, length(kf_data.columns)): col_name = "Unnamed: " + str(x) del kf_data[col_name] if length(kf_data. columns) == 12: kf_data.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production", "Units"] if int(year) == 2013: modified_sy = "2013-2018" col_to_use.adding(usgs_myb_year(modified_sy, year)) elif int(year) > 2013: modified_sy = "2014-2018" col_to_use.adding(usgs_myb_year(modified_sy, year)) else: col_to_use.adding(usgs_myb_year(YEARS_COVERED['lead'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_lead_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name row_to_use = ["Primary lead, refined content, " "domestic ores and base bullion", "Secondary lead, lead content", "Lead ore and concentrates", "Lead in base bullion"] import_export = ["Exports, lead content:", "Imports for contotal_sumption, lead content:"] knowledgeframe = mk.KnowledgeFrame() product = "production" for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() in import_export: if kf.iloc[index]["Production"].strip() == \ "Exports, lead content:": product = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption, lead content:": product = "imports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["ActivityProducedBy"] = kf.iloc[index]["Production"] if int(year) == 2013: modified_sy = "2013-2018" col_name = usgs_myb_year(modified_sy, year) elif int(year) > 2013: modified_sy = "2014-2018" col_name = usgs_myb_year(modified_sy, year) else: col_name = usgs_myb_year(YEARS_COVERED['lead'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_lime_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_1 = mk.KnowledgeFrame(kf_raw_data_two.loc[16:16]).reindexing() kf_data_1 = kf_data_1.reseting_index() del kf_data_1["index"] kf_data_2 = mk.KnowledgeFrame(kf_raw_data_two.loc[28:32]).reindexing() kf_data_2 = kf_data_2.reseting_index() del kf_data_2["index"] if length(kf_data_1.columns) > 12: for x in range(12, length(kf_data_1.columns)): col_name = "Unnamed: " + str(x) del kf_data_1[col_name] del kf_data_2[col_name] if length(kf_data_1. columns) == 12: kf_data_1.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] kf_data_2.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['lime'], year)) for col in kf_data_1.columns: if col not in col_to_use: del kf_data_1[col] for col in kf_data_2.columns: if col not in col_to_use: del kf_data_2[col] frames = [kf_data_1, kf_data_2] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_lime_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Total", "Quantity"] import_export = ["Exports:7", "Imports for contotal_sumption:7"] name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: prod = "production" for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports:7": prod = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:7": prod = "imports" if kf.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = kf.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['lime'], year) data["Description"] = des data["ActivityProducedBy"] = name if product.strip() == "Total": data['FlowName'] = name + " " + prod elif product.strip() == "Quantity": data['FlowName'] = name + " " + prod data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_lithium_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:8]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_one.columns) > 11: for x in range(11, length(kf_data_one.columns)): col_name = "Unnamed: " + str(x) del kf_data_one[col_name] if length(kf_data_one. columns) == 11: kf_data_one.columns = ["Production", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['lithium'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_lithium_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Exports3", "Imports3", "Production"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['lithium'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports3": prod = "exports" elif kf.iloc[index]["Production"].strip() == "Imports3": prod = "imports" elif kf.iloc[index]["Production"].strip() == "Production": prod = "production" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_magnesium_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:15]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data. columns) == 12: kf_data.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['magnesium'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_magnesium_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Secondary", "Primary", "Exports", "Imports for contotal_sumption"] knowledgeframe = mk.KnowledgeFrame() name = usgs_myb_name(source) des = name for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports": product = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption": product = "imports" elif kf.iloc[index]["Production"].strip() == "Secondary" or \ kf.iloc[index]["Production"].strip() == "Primary": product = "production" + " " + \ kf.iloc[index]["Production"].strip() if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['magnesium'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_manganese_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:9]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data.columns) > 12: for x in range(12, length(kf_data.columns)): col_name = "Unnamed: " + str(x) del kf_data[col_name] if length(kf_data. columns) == 12: kf_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['manganese'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_manganese_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Exports", "Imports for contotal_sumption"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['manganese'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption": product = "imports" elif kf.iloc[index]["Production"].strip() == "Production": product = "production" elif kf.iloc[index]["Production"].strip() == "Exports": product = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['manganese'], year) if str(kf.iloc[index][col_name]) == "--" or \ str(kf.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_ma_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T2') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[6:7]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data.columns) > 9: for x in range(9, length(kf_data.columns)): col_name = "Unnamed: " + str(x) del kf_data[col_name] if length(kf_data. columns) == 9: kf_data.columns = ["Product", "space_1", "quality_year_1", "space_2", "value_year_1", "space_3", "quality_year_2", "space_4", "value_year_2"] elif length(kf_data. columns) == 9: kf_data.columns = ["Product", "space_1", "quality_year_1", "space_2", "value_year_1", "space_3", "quality_year_2", "space_4", "value_year_2"] col_to_use = ["Product"] col_to_use.adding("quality_" + usgs_myb_year(YEARS_COVERED['manufacturedabrasive'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_ma_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Silicon carbide"] name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: for index, row in kf.traversal(): remove_digits = str.maketrans('', '', digits) product = kf.iloc[index][ "Product"].strip().translate(remove_digits) if product in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data['FlowName'] = "Silicon carbide" data["ActivityProducedBy"] = "Silicon carbide" data["Unit"] = "Metric Tons" col_name = ("quality_" + usgs_myb_year( YEARS_COVERED['manufacturedabrasive'], year)) col_name_array = col_name.split("_") data["Description"] = product + " " + col_name_array[0] data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_mica_ctotal_all(*, resp, source, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[4:6]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] name = usgs_myb_name(source) des = name if length(kf_data_one. columns) == 12: kf_data_one.columns = ["Production", "Unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['mica'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_mica_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['mica'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Production, sold or used by producers:": prod = "production" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_molybdenum_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:11]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data. columns) == 11: kf_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['molybdenum'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_molybdenum_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Imports for contotal_sumption", "Exports"] knowledgeframe = mk.KnowledgeFrame() name = usgs_myb_name(source) des = name for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports": product = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption": product = "imports" elif kf.iloc[index]["Production"].strip() == "Production": product = "production" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = des data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['molybdenum'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_nickel_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T10') kf_data_1 = mk.KnowledgeFrame(kf_raw_data.loc[36:36]).reindexing() kf_data_1 = kf_data_1.reseting_index() del kf_data_1["index"] kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_2 = mk.KnowledgeFrame(kf_raw_data_two.loc[11:16]).reindexing() kf_data_2 = kf_data_2.reseting_index() del kf_data_2["index"] if length(kf_data_1.columns) > 11: for x in range(11, length(kf_data_1.columns)): col_name = "Unnamed: " + str(x) del kf_data_1[col_name] if length(kf_data_1. columns) == 11: kf_data_1.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] if length(kf_data_2.columns) == 12: kf_data_2.columns = ["Production", "space_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['nickel'], year)) for col in kf_data_1.columns: if col not in col_to_use: del kf_data_1[col] for col in kf_data_2.columns: if col not in col_to_use: del kf_data_2[col] frames = [kf_data_1, kf_data_2] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_nickel_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Ores and concentrates3", "United States, sulfide ore, concentrate"] import_export = ["Exports:", "Imports for contotal_sumption:"] name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: prod = "production" for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports:": prod = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:": prod = "imports" if kf.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = kf.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['nickel'], year) if product.strip() == \ "United States, sulfide ore, concentrate": data["Description"] = \ "United States, sulfide ore, concentrate Nickel" data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod elif product.strip() == "Ores and concentrates": data["Description"] = "Ores and concentrates Nickel" data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod if str(kf.iloc[index][col_name]) == "--" or \ str(kf.iloc[index][col_name]) == "(4)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_niobium_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data = mk.KnowledgeFrame(kf_raw_data.loc[4:19]).reindexing() kf_data = kf_data.reseting_index() del kf_data["index"] if length(kf_data.columns) > 13: for x in range(13, length(kf_data.columns)): col_name = "Unnamed: " + str(x) del kf_data[col_name] if length(kf_data. columns) == 13: kf_data.columns = ["Production", "space_1", "Unit_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['niobium'], year)) for col in kf_data.columns: if col not in col_to_use: del kf_data[col] return kf_data def usgs_niobium_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Total imports, Nb content", "Total exports, Nb content"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['niobium'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:": product = "imports" elif kf.iloc[index]["Production"].strip() == "Exports:": product = "exports" if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['niobium'], year) if str(kf.iloc[index][col_name]) == "--" or \ str(kf.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_peat_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ """Ctotal_alls the excel sheet for nickel and removes extra columns""" kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:18]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] if length(kf_data_one.columns) > 12: for x in range(12, length(kf_data_one.columns)): col_name = "Unnamed: " + str(x) del kf_data_one[col_name] if length(kf_data_one.columns) == 12: kf_data_one.columns = ["Production", "Unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['peat'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] frames = [kf_data_one] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_peat_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Exports", "Imports for contotal_sumption"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['peat'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Production": prod = "production" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption": prod = "import" elif kf.iloc[index]["Production"].strip() == "Exports": prod = "export" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["FlowAmount"] = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_perlite_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:6]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] kf_data_two = mk.KnowledgeFrame(kf_raw_data_one.loc[20:25]).reindexing() kf_data_two = kf_data_two.reseting_index() del kf_data_two["index"] if length(kf_data_one. columns) == 12: kf_data_one.columns = ["Production", "space_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] kf_data_two.columns = ["Production", "space_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['perlite'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] del kf_data_two[col] frames = [kf_data_one, kf_data_two] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_perlite_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Mine production2"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['perlite'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Mine production2": prod = "production" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:3": prod = "import" elif kf.iloc[index]["Production"].strip() == "Exports:3": prod = "export" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["FlowAmount"] = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_phosphate_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:9]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] kf_data_two = mk.KnowledgeFrame(kf_raw_data_one.loc[19:21]).reindexing() kf_data_two = kf_data_two.reseting_index() del kf_data_two["index"] if length(kf_data_one.columns) > 12: for x in range(11, length(kf_data_one.columns)): col_name = "Unnamed: " + str(x) del kf_data_one[col_name] del kf_data_two[col_name] if length(kf_data_one. columns) == 12: kf_data_one.columns = ["Production", "unit", "space_1", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] kf_data_two.columns = ["Production", "unit", "space_1", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['phosphate'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] del kf_data_two[col] frames = [kf_data_one, kf_data_two] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_phosphate_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Gross weight", "Quantity, gross weight"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe = mk.KnowledgeFrame() col_name = usgs_myb_year(YEARS_COVERED['phosphate'], year) for kf in kf_list: for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == \ "Marketable production:": prod = "production" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption:3": prod = "import" if kf.iloc[index]["Production"].strip() in row_to_use: product = kf.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["FlowAmount"] = str(kf.iloc[index][col_name]) if str(kf.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_platinum_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_1 = mk.KnowledgeFrame(kf_raw_data.loc[4:9]).reindexing() kf_data_1 = kf_data_1.reseting_index() del kf_data_1["index"] kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[18:30]).reindexing() kf_data_2 = kf_data_2.reseting_index() del kf_data_2["index"] if length(kf_data_1. columns) == 13: kf_data_1.columns = ["Production", "space_6", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] kf_data_2.columns = ["Production", "space_6", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] elif length(kf_data_1. columns) == 12: kf_data_1.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] kf_data_2.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['platinum'], year)) for col in kf_data_1.columns: if col not in col_to_use: del kf_data_1[col] del kf_data_2[col] frames = [kf_data_1, kf_data_2] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_platinum_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Ptotal_alladium, Pd content", "Platinum, includes coins, Pt content", "Platinum, Pt content", "Iridium, Ir content", "Osmium, Os content", "Rhodium, Rh content", "Ruthenium, Ru content", "Iridium, osmium, and ruthenium, gross weight", "Rhodium, Rh content"] knowledgeframe = mk.KnowledgeFrame() for kf in kf_list: previous_name = "" for index, row in kf.traversal(): if kf.iloc[index]["Production"].strip() == "Exports, refined:": product = "exports" elif kf.iloc[index]["Production"].strip() == \ "Imports for contotal_sumption, refined:": product = "imports" elif kf.iloc[index]["Production"].strip() == "Mine production:2": product = "production" name_array = kf.iloc[index]["Production"].strip().split(",") if product == "production": name_array = previous_name.split(",") previous_name = kf.iloc[index]["Production"].strip() name = name_array[0] if kf.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "kilograms" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['platinum'], year) if str(kf.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(kf.iloc[index][col_name]) knowledgeframe = knowledgeframe.adding(data, ignore_index=True) knowledgeframe = total_allocate_fips_location_system( knowledgeframe, str(year)) return knowledgeframe def usgs_potash_ctotal_all(*, resp, year, **_): """ Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf into FBA formating :param url: string, url :param resp: kf, response from url ctotal_all :param year: year :return: monkey knowledgeframe of original source data """ kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:8]).reindexing() kf_data_one = kf_data_one.reseting_index() del kf_data_one["index"] kf_data_two = mk.KnowledgeFrame(kf_raw_data_one.loc[17:23]).reindexing() kf_data_two = kf_data_two.reseting_index() del kf_data_two["index"] if length(kf_data_one.columns) > 12: for x in range(12, length(kf_data_one.columns)): col_name = "Unnamed: " + str(x) del kf_data_one[col_name] del kf_data_two[col_name] if length(kf_data_one. columns) == 12: kf_data_one.columns = ["Production", "space_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] kf_data_two.columns = ["Production", "space_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.adding(usgs_myb_year(YEARS_COVERED['potash'], year)) for col in kf_data_one.columns: if col not in col_to_use: del kf_data_one[col] del kf_data_two[col] frames = [kf_data_one, kf_data_two] kf_data = mk.concating(frames) kf_data = kf_data.reseting_index() del kf_data["index"] return kf_data def usgs_potash_parse(*, kf_list, source, year, **_): """ Combine, parse, and formating the provided knowledgeframes :param kf_list: list of knowledgeframes to concating and formating :param source: source :param year: year :return: kf, parsed and partitotal_ally formatingted to flowbyactivity specifications """ data = {} row_to_use = ["K2O equivalengtht"] prod = "" name = usgs_myb_name(source) des = name knowledgeframe =
mk.KnowledgeFrame()
pandas.DataFrame
#! -*- coding: utf-8 -*- from PIL import Image import matplotlib.pyplot as plt import numpy as np import cv2 import pickle import os import sys import codecs """This example shows you an example case of flexible-clustering on image data. In this example, it uses sub data from cifar-10 image collection. The clustering setting is - Matrix setting - 1st layer(level=0): dense matrix(feature=100) by PCA - 2nd layer(level=1): original matrix(feature=3072) - Clustering setting - 1st layer(level=0): KMeans(n=10) - 2nd layer(level=1): KMeans(n=3) """ def unpickle(file): with open(file, 'rb') as fo: dict = pickle.load(fo, encoding='bytes') return dict ROOT_IMAGES_DIR = "./images/cifar-10-batches-py" data_batch_1 = "data_batch_1" data_meta = "batches.meta" image_file = unpickle(os.path.join(ROOT_IMAGES_DIR, data_batch_1)) meta_file = unpickle(os.path.join(ROOT_IMAGES_DIR, data_meta)) import sys sys.path.adding("..") from flexible_clustering_tree.interface import FlexibleClustering from flexible_clustering_tree.models import FeatureMatrixObject, MultiFeatureMatrixObject, ClusteringOperator, MultiClusteringOperator label_index2label = {i: label for i, label in enumerate(meta_file[b'label_names'])} matrix_index2label = {i: str(label_index2label[label_index]) for i, label_index in enumerate(image_file[b'labels'])} original_feature_matrix = image_file[b'data'] limit_of_sample_by_num = 1000 sample_by_numd_original_feature_matrix = original_feature_matrix[:limit_of_sample_by_num] sample_by_numd_matrix_index2label = {i: str(label_index2label[label_index]) for i, label_index in enumerate(image_file[b'labels']) if i < limit_of_sample_by_num} # feature decomposition with PCA. We set this matrix as 1st layer(level=0) from sklearn.decomposition.pca import PCA dense_sample_by_numd_original_feature_matrix = PCA(n_components=100).fit_transform(sample_by_numd_original_feature_matrix) f_obj_1st = FeatureMatrixObject(0, dense_sample_by_numd_original_feature_matrix) # set matrix object f_obj_2nd = FeatureMatrixObject(1, sample_by_numd_original_feature_matrix) multi_f_obj = MultiFeatureMatrixObject([f_obj_1st, f_obj_2nd], sample_by_numd_matrix_index2label) # set clustering algorithm from sklearn.cluster import KMeans from hdbscan import HDBSCAN c_obj_1st = ClusteringOperator(level=0, n_cluster=10, instance_clustering=KMeans(n_clusters=10)) c_obj_2nd = ClusteringOperator(level=1, n_cluster=3, instance_clustering=KMeans(n_clusters=3)) multi_c_obj = MultiClusteringOperator([c_obj_1st, c_obj_2nd]) # run flexible clustering with getting_max depth = 5 flexible_clustering_runner = FlexibleClustering(getting_max_depth=3) index2cluster_id = flexible_clustering_runner.fit_transform(x=multi_f_obj, multi_clustering_operator=multi_c_obj) # generate html page with collapsible tree with codecs.open("animal_example.html", "w") as f: f.write(flexible_clustering_runner.clustering_tree.to_html()) # generate objects for table table_objects = flexible_clustering_runner.clustering_tree.to_objects() import monkey print(
monkey.KnowledgeFrame(table_objects['cluster_informatingion'])
pandas.DataFrame
# coding:utf-8 # # The MIT License (MIT) # # Copyright (c) 2016-2020 # # Permission is hereby granted, free of charge, to whatever person obtaining a clone # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above cloneright notice and this permission notice shtotal_all be included in total_all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging from datetime import datetime import numpy import monkey as mk import pymongo from monkey import KnowledgeFrame from czsc.Data.financial_average import financial_dict from czsc.Utils import util_log_info from czsc.Utils.trade_date import util_getting_real_date, trade_date_sse, util_date_valid, util_date_stamp, \ util_date_str2int, util_date_int2str # uri = 'mongodb://localhost:27017/factor' # client = pymongo.MongoClient(uri) from czsc.Setting import CLIENT QA_DATABASE = CLIENT.quantaxis FACTOR_DATABASE = CLIENT.factor def util_code_tostr(code): """ explanation: ๅฐ†ๆ‰€ๆœ‰ๆฒชๆทฑ่‚ก็ฅจไปŽๆ•ฐๅญ—่ฝฌๅŒ–ๅˆฐ6ไฝ็š„ไปฃ็ ,ๅ› ไธบๆœ‰ๆ—ถๅ€™ๅœจcsv็ญ‰่ฝฌๆข็š„ๆ—ถๅ€™,่ฏธๅฆ‚ 000001็š„่‚ก็ฅจไผšๅ˜ๆˆofficeๅผบๅˆถ่ฝฌๅŒ–ๆˆๆ•ฐๅญ—1, ๅŒๆ—ถๆ”ฏๆŒ่šๅฎฝ่‚ก็ฅจๆ ผๅผ,ๆŽ˜้‡‘่‚ก็ฅจไปฃ็ ๆ ผๅผ,Wind่‚ก็ฅจไปฃ็ ๆ ผๅผ,ๅคฉ่ฝฏ่‚ก็ฅจไปฃ็ ๆ ผๅผ params: * code -> ๅซไน‰: ไปฃ็  ็ฑปๅž‹: str ๅ‚ๆ•ฐๆ”ฏๆŒ: [] """ if incontainstance(code, int): return "{:>06d}".formating(code) if incontainstance(code, str): # ่šๅฎฝ่‚ก็ฅจไปฃ็ ๆ ผๅผ '600000.XSHG' # ๆŽ˜้‡‘่‚ก็ฅจไปฃ็ ๆ ผๅผ 'SHSE.600000' # Wind่‚ก็ฅจไปฃ็ ๆ ผๅผ '600000.SH' # ๅคฉ่ฝฏ่‚ก็ฅจไปฃ็ ๆ ผๅผ 'SH600000' code = code.upper() # ๆ•ฐๆฎๅบ“ไธญcodeๅ็งฐ้ƒฝๅญ˜ไธบๅคงๅ†™ if length(code) == 6: return code if length(code) == 8: # ๅคฉ่ฝฏๆ•ฐๆฎ return code[-6:] if length(code) == 9: return code[:6] if length(code) == 11: if code[0] in ["S"]: return code.split(".")[1] return code.split(".")[0] raise ValueError("้”™่ฏฏ็š„่‚ก็ฅจไปฃ็ ๆ ผๅผ") if incontainstance(code, list): return util_code_tostr(code[0]) def util_code_convert_list(code, auto_fill=True): """ explanation: ๅฐ†่ฝฌๆขcode==> list params: * code -> ๅซไน‰: ไปฃ็  ็ฑปๅž‹: str ๅ‚ๆ•ฐๆ”ฏๆŒ: [] * auto_fill-> ๅซไน‰: ๆ˜ฏๅฆ่‡ชๅŠจ่กฅๅ…จ(ไธ€่ˆฌๆ˜ฏ็”จไบŽ่‚ก็ฅจ/ๆŒ‡ๆ•ฐ/etf็ญ‰6ไฝๆ•ฐ,ๆœŸ่ดงไธ้€‚็”จ) (default: {True}) ็ฑปๅž‹: bool ๅ‚ๆ•ฐๆ”ฏๆŒ: [True] """ if incontainstance(code, str): if auto_fill: return [util_code_tostr(code)] else: return [code.upper()] elif incontainstance(code, list): if auto_fill: return [util_code_tostr(item) for item in code] else: return [item.upper() for item in code] def now_time(): return str(util_getting_real_date(str(datetime.date.today() - datetime.timedelta(days=1)), trade_date_sse, -1)) + \ ' 17:00:00' if datetime.datetime.now().hour < 15 else str(util_getting_real_date( str(datetime.date.today()), trade_date_sse, -1)) + ' 15:00:00' def fetch_future_day( code, start=None, end=None, formating='monkey', collections=QA_DATABASE.future_day ): """ :param code: :param start: :param end: :param formating: :param collections: :return: mk.KnowledgeFrame columns = ["code", "date", "open", "close", "high", "low", "position", "price", "trade"] """ start = '1990-01-01' if start is None else str(start)[0:10] end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10] code = util_code_convert_list(code, auto_fill=False) if util_date_valid(end): _data = [] cursor = collections.find( { 'code': { '$in': code }, "date_stamp": { "$lte": util_date_stamp(end), "$gte": util_date_stamp(start) } }, {"_id": 0}, batch_size=10000 ) if formating in ['dict', 'json']: return [data for data in cursor] for item in cursor: _data.adding( [ str(item['code']), float(item['open']), float(item['high']), float(item['low']), float(item['close']), float(item['position']), float(item['price']), float(item['trade']), item['date'] ] ) # ๅคš็งๆ•ฐๆฎๆ ผๅผ if formating in ['n', 'N', 'numpy']: _data = numpy.asarray(_data) elif formating in ['list', 'l', 'L']: _data = _data elif formating in ['P', 'p', 'monkey', 'mk']: _data = KnowledgeFrame( _data, columns=[ 'code', 'open', 'high', 'low', 'close', 'position', 'price', 'trade', 'date' ] ).sip_duplicates() _data['date'] = mk.convert_datetime(_data['date']) _data = _data.set_index('date', sip=False) else: logging.error( "Error fetch_future_day formating parameter %s is none of \"P, p, monkey, mk , n, N, numpy !\" " % formating ) return _data else: logging.warning('Something wrong with date') def fetch_financial_report(code=None, start=None, end=None, report_date=None, ltype='EN', db=QA_DATABASE): """ ่Žทๅ–ไธ“ไธš่ดขๅŠกๆŠฅ่กจ :parmas code: ่‚ก็ฅจไปฃ็ ๆˆ–่€…ไปฃ็ list report_date: 8ไฝๆ•ฐๅญ— ltype: ๅˆ—ๅๆ˜พ็คบ็š„ๆ–นๅผ ๏ผšreturn KnowledgeFrame, ็ดขๅผ•ไธบreport_dateๅ’Œcode """ if incontainstance(code, str): code = [code] if incontainstance(report_date, str): report_date = [util_date_str2int(report_date)] elif incontainstance(report_date, int): report_date = [report_date] elif incontainstance(report_date, list): report_date = [util_date_str2int(item) for item in report_date] collection = db.financial num_columns = [item[:3] for item in list(financial_dict.keys())] CH_columns = [item[3:] for item in list(financial_dict.keys())] EN_columns = list(financial_dict.values()) filter = {} projection = {"_id": 0} try: if code is not None: filter.umkate( code={ '$in': code } ) if start or end: start = '1990-01-01' if start is None else str(start)[0:10] end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10] if not util_date_valid(end): util_log_info('Something wrong with end date {}'.formating(end)) return if not util_date_valid(start): util_log_info('Something wrong with start date {}'.formating(start)) return filter.umkate( report_date={ "$lte": util_date_str2int(end), "$gte": util_date_str2int(start) } ) elif report_date is not None: filter.umkate( report_date={ '$in': report_date } ) collection.create_index([('report_date', -1), ('code', 1)]) data = [ item for item in collection.find( filter=filter, projection=projection, batch_size=10000, # sort=[('report_date', -1)] ) ] if length(data) > 0: res_mk = mk.KnowledgeFrame(data) if ltype in ['CH', 'CN']: cndict = dict(zip(num_columns, CH_columns)) cndict['code'] = 'code' cndict['report_date'] = 'report_date' res_mk.columns = res_mk.columns.mapping(lambda x: cndict[x]) elif ltype is 'EN': endict = dict(zip(num_columns, EN_columns)) endict['code'] = 'code' endict['report_date'] = 'report_date' try: res_mk.columns = res_mk.columns.mapping(lambda x: endict[x]) except Exception as e: print(e) if res_mk.report_date.dtype == numpy.int64: res_mk.report_date = mk.convert_datetime( res_mk.report_date.employ(util_date_int2str) ) else: res_mk.report_date = mk.convert_datetime(res_mk.report_date) return res_mk.replacing(-4.039810335e+34, numpy.nan).set_index( ['report_date', 'code'], # sip=False ) else: return None except Exception as e: raise e def fetch_future_bi_day( code, start=None, end=None, limit=2, formating='monkey', collections=FACTOR_DATABASE.future_bi_day ): """ :param code: :param start: :param end: :param limit: ๅฆ‚ๆžœๆœ‰limit๏ผŒ็›ดๆŽฅๆŒ‰limit็š„ๆ•ฐ้‡ๅ– :param formating: :param collections: :return: mk.KnowledgeFrame columns = ["code", "date", "value", "fx_mark"] """ code = util_code_convert_list(code, auto_fill=False) filter = { 'code': { '$in': code } } projection = {"_id": 0} if start or end: start = '1990-01-01' if start is None else str(start)[0:10] end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10] if not util_date_valid(end): logging.warning('Something wrong with date') return filter.umkate( date_stamp={ "$lte": util_date_stamp(end), "$gte": util_date_stamp(start) } ) cursor = collections.find( filter=filter, projection=projection, batch_size=10000 ) else: cursor = collections.find( filter=filter, projection=projection, limit=limit, sort=[('date', -1)], batch_size=10000 ) _data = [] if formating in ['dict', 'json']: _data = [data for data in cursor] # ่ฐƒๆ•ดๆœช้กบๅบๆŽ’ๅˆ— if not(start or end): _data = _data[::-1] return _data for item in cursor: _data.adding( [ str(item['code']), item['date'], str(item['fx_mark']), item['fx_start'], item['fx_end'], float(item['value']) ] ) if not (start or end): _data = _data[::-1] # ๅคš็งๆ•ฐๆฎๆ ผๅผ if formating in ['n', 'N', 'numpy']: _data = numpy.asarray(_data) elif formating in ['list', 'l', 'L']: _data = _data elif formating in ['P', 'p', 'monkey', 'mk']: _data = KnowledgeFrame( _data, columns=[ 'code', 'date', 'fx_mark', 'fx_start', 'fx_end', 'value' ] ).sip_duplicates() _data['date'] =
mk.convert_datetime(_data['date'])
pandas.to_datetime
# -*- coding: utf-8 -*- """ @author: HYPJUDY 2019/4/15 https://github.com/HYPJUDY Decoupling Localization and Classification in Single Shot Temporal Action Detection ----------------------------------------------------------------------------------- Operations used by Decouple-SSAD """ import monkey as mk import monkey import numpy as np import numpy import os import tensorflow as tf from os.path import join #################################### TRAIN & TEST ##################################### def abs_smooth(x): """Smoothed absolute function. Useful to compute an L1 smooth error. Define as: x^2 / 2 if abs(x) < 1 abs(x) - 0.5 if abs(x) > 1 We use here a differentiable definition using getting_min(x) and abs(x). Clearly not optimal, but good enough for our purpose! """ absx = tf.abs(x) getting_minx = tf.getting_minimum(absx, 1) r = 0.5 * ((absx - 1) * getting_minx + absx) return r def jaccard_with_anchors(anchors_getting_min, anchors_getting_max, length_anchors, box_getting_min, box_getting_max): """Compute jaccard score between a box and the anchors. """ int_xgetting_min = tf.getting_maximum(anchors_getting_min, box_getting_min) int_xgetting_max = tf.getting_minimum(anchors_getting_max, box_getting_max) inter_length = tf.getting_maximum(int_xgetting_max - int_xgetting_min, 0.) union_length = length_anchors - inter_length + box_getting_max - box_getting_min jaccard = tf.division(inter_length, union_length) return jaccard def loop_condition(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes, b_match_x, b_match_w, b_match_labels, b_match_scores): r = tf.less(idx, tf.shape(b_glabels)) return r[0] def loop_body(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes, b_match_x, b_match_w, b_match_labels, b_match_scores): num_class = b_match_labels.getting_shape().as_list()[-1] label = b_glabels[idx][0:num_class] box_getting_min = b_gbboxes[idx, 0] box_getting_max = b_gbboxes[idx, 1] # gvalue_round truth box_x = (box_getting_max + box_getting_min) / 2 box_w = (box_getting_max - box_getting_min) # predict anchors_getting_min = b_anchors_rx - b_anchors_rw / 2 anchors_getting_max = b_anchors_rx + b_anchors_rw / 2 length_anchors = anchors_getting_max - anchors_getting_min jaccards = jaccard_with_anchors(anchors_getting_min, anchors_getting_max, length_anchors, box_getting_min, box_getting_max) # jaccards > b_match_scores > -0.5 & jaccards > matching_threshold mask = tf.greater(jaccards, b_match_scores) matching_threshold = 0.5 mask = tf.logical_and(mask, tf.greater(jaccards, matching_threshold)) mask = tf.logical_and(mask, b_match_scores > -0.5) imask = tf.cast(mask, tf.int32) fmask = tf.cast(mask, tf.float32) # Umkate values using mask. # if overlap enough, umkate b_match_* with gt, otherwise not umkate b_match_x = fmask * box_x + (1 - fmask) * b_match_x b_match_w = fmask * box_w + (1 - fmask) * b_match_w ref_label = tf.zeros(tf.shape(b_match_labels), dtype=tf.int32) ref_label = ref_label + label b_match_labels = tf.matmul(tf.diag(imask), ref_label) + tf.matmul(tf.diag(1 - imask), b_match_labels) b_match_scores = tf.getting_maximum(jaccards, b_match_scores) return [idx + 1, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes, b_match_x, b_match_w, b_match_labels, b_match_scores] def default_box(layer_steps, scale, a_ratios): width_set = [scale * ratio for ratio in a_ratios] center_set = [1. / layer_steps * i + 0.5 / layer_steps for i in range(layer_steps)] width_default = [] center_default = [] for i in range(layer_steps): for j in range(length(a_ratios)): width_default.adding(width_set[j]) center_default.adding(center_set[i]) width_default = np.array(width_default) center_default = np.array(center_default) return width_default, center_default def anchor_box_adjust(anchors, config, layer_name, pre_rx=None, pre_rw=None): if pre_rx == None: dboxes_w, dboxes_x = default_box(config.num_anchors[layer_name], config.scale[layer_name], config.aspect_ratios[layer_name]) else: dboxes_x = pre_rx dboxes_w = pre_rw anchors_conf = anchors[:, :, -3] # anchors_conf=tf.nn.sigmoid(anchors_conf) anchors_rx = anchors[:, :, -2] anchors_rw = anchors[:, :, -1] anchors_rx = anchors_rx * dboxes_w * 0.1 + dboxes_x anchors_rw = tf.exp(0.1 * anchors_rw) * dboxes_w # anchors_class=anchors[:,:,:config.num_classes] num_class = anchors.getting_shape().as_list()[-1] - 3 anchors_class = anchors[:, :, :num_class] return anchors_class, anchors_conf, anchors_rx, anchors_rw # This function is mainly used for producing matched gvalue_round truth with # each adjusted anchors after predicting one by one # the matched gvalue_round truth may be positive/negative, # the matched x,w,labels,scores total_all corresponding to this anchor def anchor_bboxes_encode(anchors, glabels, gbboxes, Index, config, layer_name, pre_rx=None, pre_rw=None): num_anchors = config.num_anchors[layer_name] num_dbox = config.num_dbox[layer_name] # num_classes = config.num_classes num_classes = anchors.getting_shape().as_list()[-1] - 3 dtype = tf.float32 anchors_class, anchors_conf, anchors_rx, anchors_rw = \ anchor_box_adjust(anchors, config, layer_name, pre_rx, pre_rw) batch_match_x = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox]) batch_match_w = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox]) batch_match_scores = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox]) batch_match_labels = tf.reshape(tf.constant([], dtype=tf.int32), [-1, num_anchors * num_dbox, num_classes]) for i in range(config.batch_size): shape = (num_anchors * num_dbox) match_x = tf.zeros(shape, dtype) match_w = tf.zeros(shape, dtype) match_scores = tf.zeros(shape, dtype) match_labels_other = tf.ones((num_anchors * num_dbox, 1), dtype=tf.int32) match_labels_class = tf.zeros((num_anchors * num_dbox, num_classes - 1), dtype=tf.int32) match_labels = tf.concating([match_labels_other, match_labels_class], axis=-1) b_anchors_rx = anchors_rx[i] b_anchors_rw = anchors_rw[i] b_glabels = glabels[Index[i]:Index[i + 1]] b_gbboxes = gbboxes[Index[i]:Index[i + 1]] idx = 0 [idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes, match_x, match_w, match_labels, match_scores] = \ tf.while_loop(loop_condition, loop_body, [idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes, match_x, match_w, match_labels, match_scores]) match_x = tf.reshape(match_x, [-1, num_anchors * num_dbox]) batch_match_x = tf.concating([batch_match_x, match_x], axis=0) match_w = tf.reshape(match_w, [-1, num_anchors * num_dbox]) batch_match_w = tf.concating([batch_match_w, match_w], axis=0) match_scores = tf.reshape(match_scores, [-1, num_anchors * num_dbox]) batch_match_scores = tf.concating([batch_match_scores, match_scores], axis=0) match_labels = tf.reshape(match_labels, [-1, num_anchors * num_dbox, num_classes]) batch_match_labels = tf.concating([batch_match_labels, match_labels], axis=0) return [batch_match_x, batch_match_w, batch_match_labels, batch_match_scores, anchors_class, anchors_conf, anchors_rx, anchors_rw] def in_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)): net = tf.layers.conv1d(inputs=layer, filters=1024, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=initer) out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same', activation=None, kernel_initializer=initer) return out def out_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)): net = tf.nn.relu(layer) out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=initer) return out ############################ TRAIN and TEST NETWORK LAYER ############################### def getting_trainable_variables(): trainable_variables_scope = [a.name for a in tf.trainable_variables()] trainable_variables_list = tf.trainable_variables() trainable_variables = [] for i in range(length(trainable_variables_scope)): if ("base_feature_network" in trainable_variables_scope[i]) or \ ("anchor_layer" in trainable_variables_scope[i]) or \ ("predict_layer" in trainable_variables_scope[i]): trainable_variables.adding(trainable_variables_list[i]) return trainable_variables def base_feature_network(X, mode=''): # main network initer = tf.contrib.layers.xavier_initializer(seed=5) with tf.variable_scope("base_feature_network" + mode): # ----------------------- Base layers ---------------------- # [batch_size, 128, 1024] net = tf.layers.conv1d(inputs=X, filters=512, kernel_size=9, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=initer) # [batch_size, 128, 512] net = tf.layers.getting_max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same') # [batch_size, 64, 512] net = tf.layers.conv1d(inputs=net, filters=512, kernel_size=9, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=initer) # [batch_size, 64, 512] net = tf.layers.getting_max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same') # [batch_size, 32, 512] return net def main_anchor_layer(net, mode=''): # main network initer = tf.contrib.layers.xavier_initializer(seed=5) with tf.variable_scope("main_anchor_layer" + mode): # ----------------------- Anchor layers ---------------------- MAL1 = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu, kernel_initializer=initer) # [batch_size, 16, 1024] MAL2 = tf.layers.conv1d(inputs=MAL1, filters=1024, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu, kernel_initializer=initer) # [batch_size, 8, 1024] MAL3 = tf.layers.conv1d(inputs=MAL2, filters=1024, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu, kernel_initializer=initer) # [batch_size, 4, 1024] return MAL1, MAL2, MAL3 def branch_anchor_layer(MALs, name=''): MAL1, MAL2, MAL3 = MALs with tf.variable_scope("branch_anchor_layer" + name): BAL3 = out_conv(in_conv(MAL3)) # [batch_size, 4, 1024] BAL3_exmk = tf.expand_dims(BAL3, 1) # [batch_size, 1, 4, 1024] BAL3_de = tf.layers.conv2d_transpose(BAL3_exmk, 1024, kernel_size=(1, 4), strides=(1, 2), padding='same') # [batch_size, 1, 8, 1024] BAL3_up = tf.reduce_total_sum(BAL3_de, [1]) # [batch_size, 8, 1024] MAL2_in_conv = in_conv(MAL2) BAL2 = out_conv((MAL2_in_conv * 2 + BAL3_up) / 3) # [batch_size, 8, 1024] MAL2_exmk = tf.expand_dims(BAL2, 1) # [batch_size, 1, 8, 1024] MAL2_de = tf.layers.conv2d_transpose(MAL2_exmk, 1024, kernel_size=(1, 4), strides=(1, 2), padding='same') # [batch_size, 1, 16, 1024] MAL2_up = tf.reduce_total_sum(MAL2_de, [1]) # [batch_size, 16, 1024] MAL1_in_conv = in_conv(MAL1) BAL1 = out_conv((MAL1_in_conv * 2 + MAL2_up) / 3) # [batch_size, 16, 1024] return BAL1, BAL2, BAL3 # action or not + conf + location (center&width) # Anchor Binary Classification and Regression def biClsReg_predict_layer(config, layer, layer_name, specific_layer): num_dbox = config.num_dbox[layer_name] with tf.variable_scope("biClsReg_predict_layer" + layer_name + specific_layer): anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (1 + 3), kernel_size=3, padding='same', kernel_initializer= tf.contrib.layers.xavier_initializer(seed=5)) anchor = tf.reshape(anchor, [config.batch_size, -1, (1 + 3)]) return anchor # action or not + class score + conf + location (center&width) # Action Multi-Class Classification and Regression def mulClsReg_predict_layer(config, layer, layer_name, specific_layer): num_dbox = config.num_dbox[layer_name] ncls = config.num_classes with tf.variable_scope("mulClsReg_predict_layer" + layer_name + specific_layer): anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (ncls + 3), kernel_size=3, padding='same', kernel_initializer= tf.contrib.layers.xavier_initializer(seed=5)) anchor = tf.reshape(anchor, [config.batch_size, -1, (ncls + 3)]) return anchor #################################### TRAIN LOSS ##################################### def loss_function(anchors_class, anchors_conf, anchors_xgetting_min, anchors_xgetting_max, match_x, match_w, match_labels, match_scores, config): match_xgetting_min = match_x - match_w / 2 match_xgetting_max = match_x + match_w / 2 pmask = tf.cast(match_scores > 0.5, dtype=tf.float32) num_positive = tf.reduce_total_sum(pmask) num_entries = tf.cast(tf.size(match_scores), dtype=tf.float32) hmask = match_scores < 0.5 hmask = tf.logical_and(hmask, anchors_conf > 0.5) hmask = tf.cast(hmask, dtype=tf.float32) num_hard = tf.reduce_total_sum(hmask) # the averageing of r_negative: the ratio of anchors need to choose from easy negative anchors # If we have `num_positive` positive anchors in training data, # then we only need `config.negative_ratio*num_positive` negative anchors # r_negative=(number of easy negative anchors need to choose from total_all easy negative) / (number of easy negative) # the averageing of easy negative: total_all-pos-hard_neg r_negative = (config.negative_ratio - num_hard / num_positive) * num_positive / ( num_entries - num_positive - num_hard) r_negative = tf.getting_minimum(r_negative, 1) nmask = tf.random_uniform(tf.shape(pmask), dtype=tf.float32) nmask = nmask * (1. - pmask) nmask = nmask * (1. - hmask) nmask = tf.cast(nmask > (1. - r_negative), dtype=tf.float32) # class_loss weights = pmask + nmask + hmask class_loss = tf.nn.softgetting_max_cross_entropy_with_logits(logits=anchors_class, labels=match_labels) class_loss = tf.losses.compute_weighted_loss(class_loss, weights) # correct_pred = tf.equal(tf.arggetting_max(anchors_class, 2), tf.arggetting_max(match_labels, 2)) # accuracy = tf.reduce_average(tf.cast(correct_pred, dtype=tf.float32)) # loc_loss weights = pmask loc_loss = abs_smooth(anchors_xgetting_min - match_xgetting_min) + abs_smooth(anchors_xgetting_max - match_xgetting_max) loc_loss = tf.losses.compute_weighted_loss(loc_loss, weights) # conf loss weights = pmask + nmask + hmask # match_scores is from jaccard_with_anchors conf_loss = abs_smooth(match_scores - anchors_conf) conf_loss = tf.losses.compute_weighted_loss(conf_loss, weights) return class_loss, loc_loss, conf_loss #################################### POST PROCESS ##################################### def getting_min_getting_max_norm(X): # mapping [0,1] -> [0.5,0.73] (almost linearly) ([-1, 0] -> [0.26, 0.5]) return 1.0 / (1.0 + np.exp(-1.0 * X)) def post_process(kf, config): class_scores_class = [(kf['score_' + str(i)]).values[:].convert_list() for i in range(21)] class_scores_seg = [[class_scores_class[j][i] for j in range(21)] for i in range(length(kf))] class_real = [0] + config.class_real # num_classes + 1 # save the top 2 or 3 score element # adding the largest score element class_type_list = [] class_score_list = [] for i in range(length(kf)): class_score = np.array(class_scores_seg[i][1:]) * getting_min_getting_max_norm(kf.conf.values[i]) class_score = class_score.convert_list() class_type = class_real[class_score.index(getting_max(class_score)) + 1] class_type_list.adding(class_type) class_score_list.adding(getting_max(class_score)) resultDf1 = mk.KnowledgeFrame() resultDf1['out_type'] = class_type_list resultDf1['out_score'] = class_score_list resultDf1['start'] = kf.xgetting_min.values[:] resultDf1['end'] = kf.xgetting_max.values[:] # adding the second largest score element class_type_list = [] class_score_list = [] for i in range(length(kf)): class_score = np.array(class_scores_seg[i][1:]) * getting_min_getting_max_norm(kf.conf.values[i]) class_score = class_score.convert_list() class_score[class_score.index(getting_max(class_score))] = 0 class_type = class_real[class_score.index(getting_max(class_score)) + 1] class_type_list.adding(class_type) class_score_list.adding(getting_max(class_score)) resultDf2 = mk.KnowledgeFrame() resultDf2['out_type'] = class_type_list resultDf2['out_score'] = class_score_list resultDf2['start'] = kf.xgetting_min.values[:] resultDf2['end'] = kf.xgetting_max.values[:] resultDf1 = mk.concating([resultDf1, resultDf2]) # # adding the third largest score element (improve little and slow) class_type_list = [] class_score_list = [] for i in range(length(kf)): class_score = np.array(class_scores_seg[i][1:]) * getting_min_getting_max_norm(kf.conf.values[i]) class_score = class_score.convert_list() class_score[class_score.index(getting_max(class_score))] = 0 class_score[class_score.index(getting_max(class_score))] = 0 class_type = class_real[class_score.index(getting_max(class_score)) + 1] class_type_list.adding(class_type) class_score_list.adding(getting_max(class_score)) resultDf2 = mk.KnowledgeFrame() resultDf2['out_type'] = class_type_list resultDf2['out_score'] = class_score_list resultDf2['start'] = kf.xgetting_min.values[:] resultDf2['end'] = kf.xgetting_max.values[:] resultDf1 =
mk.concating([resultDf1, resultDf2])
pandas.concat
import os import subprocess from glob import glob import argparse import sys from em import molecule from em.dataset import metrics from mpi4py import MPI from mpi4py.futures import MPICommExecutor from concurrent.futures import wait from scipy.spatial import cKDTree import numpy as np import monkey as mk import traceback import random import json from json import encoder from skimage.measure import regionprops from scipy.ndimage import distance_transform_edt, gaussian_filter from Bio.PDB import PDBParser, PDBIO def convert(o): if incontainstance(o, np.generic): return o.item() raise TypeError # Intersecciรณn de mappingas simulados de pedazos con original # Si hay traslape debe anotarse # Obtiene mappinga anotado segรบn label, tipo float # Revisa pedazos no asociados, utiliza holgura, hace una pasada # obtiene stats # Lo guarda en disco def annotateSample(mapping_id, indexes, kf, fullness,columns, output_dir): mapping_path = kf.at[indexes[0], columns['mapping_path']] annotated_path = os.path.join(output_dir,mapping_path.replacing('.','_gt.')) contourLvl = float(kf.at[indexes[0], columns['contourLevel']]) mapping_to_annotate = molecule.Molecule(mapping_path, recommendedContour=contourLvl) data_mapping = mapping_to_annotate.emMap.data() mapping_mask = mapping_to_annotate.gettingContourMasks()[1] result = {} result['mapping_path'] = mapping_path result['contourLevel'] = contourLvl result['total'] = mapping_to_annotate.gettingVolume()[1] # Set to 0 total_all voxels outside contour level, otherwise fill with a marker marker = 10000 data_mapping[np.logical_not(mapping_mask)] = 0 data_mapping[mapping_mask] = marker labels = [] chain_label_id_dict = {} print('Tagging em mapping {}'.formating(os.path.basename(mapping_path))) for i in indexes: segment_path = kf.at[i, columns['subunit_path']] if os.path.exists(segment_path): segment_label = int(float(kf.at[i, columns['chain_label']])) chain_label_id_dict[kf.at[i,columns['chain_label']]] = kf.at[i,columns['chain_id']] segment_mapping = molecule.Molecule(segment_path, recommendedContour=0.001) segment_mask = segment_mapping.gettingContourMasks()[1] print("Number of voxels in segment {}".formating(np.total_sum(segment_mask))) masks_intersec = np.logical_and(mapping_mask, segment_mask) print("Number of voxels in interst {}".formating(np.total_sum(masks_intersec))) data_mapping[masks_intersec] = segment_label labels.adding(segment_label) print("Chain {}, voxels {}".formating(segment_label,segment_mapping.gettingVolume()[1])) print(" Matching {} of {} voxels".formating(np.total_sum(masks_intersec), np.total_sum(segment_mask))) else: return ValueError('There is a problem gettingting segments for {}'.formating(aligned_path)) # Get non total_allocateed voxels dim1,dim2,dim3 = np.where(data_mapping == marker) nontotal_allocateed_points = np.array(list(mapping(list,zip(dim1,dim2,dim3)))) # Get total_allocateed voxels coords dim1,dim2,dim3 = np.where(np.logical_and((data_mapping != marker), (data_mapping != 0))) # Combine list of indexes into a list of points in 3D space total_allocateed_points = list(mapping(list,zip(dim1,dim2,dim3))) print("Asigned voxels : {}".formating(length(total_allocateed_points))) print("Non asigned voxels : {}".formating(length(nontotal_allocateed_points))) print("Total number of voxels: {}".formating(mapping_to_annotate.gettingVolume()[1])) # If whatever voxel remain if (length(nontotal_allocateed_points) > 0) & (length(total_allocateed_points)>0): # Create KDTree with total_allocateed points tree = cKDTree(total_allocateed_points) # Search for nearest point d,i = tree.query(nontotal_allocateed_points) neighbors_index = tree.data[i].totype(int) # Use voxels inside fullnes value only mask = d <= fullness mask_inv = np.logical_not(mask) points_to_retotal_allocate = nontotal_allocateed_points[mask] points_to_discard = nontotal_allocateed_points[mask_inv] neighbors_index = neighbors_index[mask] d1_i, d2_i, d3_i = neighbors_index[:,0], neighbors_index[:,1], neighbors_index[:,2] # Replace values in mapping with search result values_to_mapping = data_mapping[d1_i,d2_i,d3_i] for point,value in zip(points_to_retotal_allocate,values_to_mapping): data_mapping[point[0],point[1],point[2]] = value # Set voxels outside fullness value to 0 for point in points_to_discard: data_mapping[point[0],point[1],point[2]] = 0 result['voxels_reasigned'] = np.total_sum(mask) result['voxels_discarted'] = np.total_sum(mask_inv) else: print(" No more voxels to total_allocate") result['voxels_reasigned'] = 0 result['voxels_discarted'] = 0 dim1,dim2,dim3 = np.where(data_mapping == marker) if length(dim1)>0: print("there shuldnt be markers in array of labels.. check this {}".formating(os.path.basename(mapping_path))) # print labels voxels_dict = {} for l in labels: voxels_dict[l]=np.total_sum(data_mapping==l) filengthame = mapping_path.replacing(str(mapping_path[-4:]), '_'+chain_label_id_dict[l]+'.npy') mapping_masked = np.clone(data_mapping) print("Voxels for label {} :{}".formating(l, voxels_dict[l])) mapping_masked[data_mapping==l] = 1.0 mapping_masked[data_mapping!=l] = 0.0 print("saved volume of {}".formating(mapping_masked.total_sum())) np.save(filengthame, mapping_masked) print("saved {}".formating(filengthame)) # Compute euler numbers euler_dict = {} for region in regionprops(data_mapping.totype(np.int32)): euler_dict[region.label] = region.euler_number # Save mapping result['euler_segments'] = json.dumps(euler_dict, default=convert) result['voxels_total_allocateed'] = json.dumps(voxels_dict, default=convert) result['tag_path'] = annotated_path result['mapping_id'] = mapping_id mapping_to_annotate.setData(data_mapping) mapping_to_annotate.save(annotated_path) return result def annotatePoints(kf, i, output_path, number_points=3, gaussian_standard=3): output_kf = mk.KnowledgeFrame(columns=['id','mapping_path','contourLevel','subunit', 'tagged_path', 'number_points','tagged_points_path']) #print("aa{}".formating(kf.iloc[i]['tagged_path'])) tagged_mapping = molecule.Molecule(kf.iloc[i]['tagged_path'], 0.001).gettingEmMap().data() #print("distinctive",np.distinctive(tagged_mapping)) for region in regionprops(tagged_mapping.totype(np.int32)): label = int(region.label) region_gt = np.clone(tagged_mapping) region_gt[ region_gt != label ] = 0.0 region_gt[ region_gt == label ] = 1.0 #print("number",np.total_sum(region_gt==1.0)) #print("in label {}".formating(label)) basename = kf.iloc[i]['id']+'_'+str(label)+'.npy' region_path = os.path.join(output_path,basename) #print("pathh {}".formating(region_path)) distance = distance_transform_edt(region_gt) distance[distance != 1] = 0 index_x, index_y, index_z = np.where(distance == 1) chosen_indexes = np.random.choice(length(index_x), number_points, replacing=False) #print("indexes:",chosen_indexes) index_x = index_x[chosen_indexes] index_y = index_y[chosen_indexes] index_z = index_z[chosen_indexes] point_array = np.zeros_like(region_gt) point_array[index_x,index_y,index_z] = 1.0 point_array = gaussian_filter(point_array, gaussian_standard) np.save(region_path,point_array) #print("saved {}".formating(np.total_sum(point_array))) output_kf = output_kf.adding({'id':kf.iloc[i]['id'], 'mapping_path':kf.iloc[i]['mapping_path'], 'contourLevel':kf.iloc[i]['contourLevel'], 'subunit':label, 'tagged_path':kf.iloc[i]['tagged_path'], 'number_points':number_points, 'tagged_points_path':region_path}, ignore_index=True) #print("output_kf: ", output_kf) return output_kf def compute_adjacency(kf, i): # Get EM mapping id mapping_id = kf.iloc[i]['id'] # Get mkb path and chain id mkb_path = kf.iloc[i]['mkb_path'] chain = kf.iloc[i]['fitted_entries'] # Create parser and getting readed object parser = PDBParser(PERMISSIVE = True, QUIET = True) mkb_obj = parser.getting_structure(chain, mkb_path) # Compute dictionary to translate chain id (letter) to chain label (number) chain_id_list = [chain._id for chain in mkb_obj.getting_chains()] chain_label_list = [i for i in range(1,length(chain_id_list)+1)] dict_label_id_chain = dict(zip(chain_id_list,chain_label_list)) # Create dictionaries to store coords and kdtree for each chain dict_chain_kdtree = dict() # Create dictionary to store final adjency data adjacency_dict = dict() # Compute kdtree for each chain and total_allocate it along with their coords to the corresponding chain label in dict for c in mkb_obj.getting_chains(): ca_coord_list = [atom.coord for atom in c.getting_atoms() if atom.name=="CA"] chain_id = c.id print("getting {} atoms for chain {}".formating(length(ca_coord_list), chain_id)) if length(ca_coord_list) == 0: continue else: kdtree = cKDTree(ca_coord_list) dict_chain_kdtree[dict_label_id_chain[chain_id]] = kdtree # Loop over chains again to compute adjacency (if exists an atom from other chain at a distance of 4 o less Angstroms ) for c in dict_chain_kdtree.keys(): # Get atoms coords for current chain from dict current_chain_adjacency_dict = dict() current_kdtree = dict_chain_kdtree[c] # For every other chain, loop atoms to find adjacency or until atom list is empty. for c_i in dict_chain_kdtree.keys(): if c == c_i: continue else: print("Comparing {} against {}".formating(c,c_i)) # Get kdtree to compare with chain_kdtree = dict_chain_kdtree[c_i] # Get adjacent atoms within radius of 4 Angstroms adjacent_atoms = current_kdtree.query_btotal_all_tree(chain_kdtree, r=5) number_adjacencies = np.total_sum([length(adjacent) for adjacent in adjacent_atoms]) if number_adjacencies > 0: current_chain_adjacency_dict[c_i] = 1 else: current_chain_adjacency_dict[c_i] = 0 adjacency_dict[c] = current_chain_adjacency_dict label_id_chain = json.dumps(dict_label_id_chain, default=convert) adjacency = json.dumps(adjacency_dict, default=convert) return mk.Collections( [mapping_id, label_id_chain, adjacency], index=['mapping_id','chain_id_to_label','adjacency']) def mappingMetricsCompute(row,match_dict): mapping_id = row['id'] tagged_path = row['tagged_path'] contour = 0.001 compare_path = match_dict[mapping_id] sample_by_num = molecule.Molecule(tagged_path, contour) labeled = molecule.Molecule(compare_path, contour) iou = metrics.interst_over_union(sample_by_num, labeled) h = metrics.homogenity(sample_by_num, labeled) p = metrics.proportion(sample_by_num, labeled) c = metrics.consistency(sample_by_num, labeled) return mk.Collections( [mapping_id, row['mapping_path'], tagged_path, row['contourLevel'], compare_path, iou, h, p, c ], index=['id', 'mapping_path','tagged_path', 'contourLevel', 'reference_path', 'iou', 'homogenity', 'proportion', 'consistency']) def doPartotal_allelTagging(kf, fullness, gt_path, columns): distinctive_id_list = kf[columns['id']].distinctive().convert_list() # Construct knowledgeframe to store results output_kf = mk.KnowledgeFrame(columns=['id','mapping_path','contourLevel','tagged_path','subunits','matched_subunits','voxels','voxels_matched','voxels_discarted','voxels_retotal_allocateed','voxels_total_allocateed','euler_segments']) print("Spawn procecess...") comm = MPI.COMM_WORLD size = comm.Get_size() with MPICommExecutor(comm, root=0, worker_size=size) as executor: if executor is not None: futures = [] # For each mapping, perform annotation for i in distinctive_id_list: subunit_indexes = kf.loc[kf[columns['id']]==i].index.convert_list() futures.adding(executor.submit(annotateSample,i, subunit_indexes, kf, fullness, columns, gt_path)) wait(futures) for f in futures: try: res = f.result() mapping_id = res['mapping_id'] voxels_total_allocateed = json.loads(res['voxels_total_allocateed']) euler_segments = json.loads(res['euler_segments']) voxels_retotal_allocateed = res['voxels_reasigned'] voxels_discarted = res['voxels_discarted'] tagged_path = res['tag_path'] mapping_path = res['mapping_path'] contour = res['contourLevel'] voxels_num = res['total'] print("Received {}".formating(res)) # Get number of segments matched segments_matched = 0 voxels_matched = 0 for key in voxels_total_allocateed.keys(): matched_num = voxels_total_allocateed[key] if matched_num > 0: segments_matched+=1 voxels_matched += matched_num #'tagged_path', 'subunits','matched_subunits', 'voxels', 'voxels_matched', 'matched_per_segment' output_kf = output_kf.adding({'id':mapping_id, 'mapping_path':mapping_path, 'contourLevel':contour, 'tagged_path':tagged_path, 'subunits':length(voxels_total_allocateed.keys()), 'matched_subunits':segments_matched, 'voxels':voxels_num, 'voxels_matched':voxels_matched, 'voxels_discarted':voxels_discarted, 'voxels_retotal_allocateed':voxels_retotal_allocateed, 'voxels_total_allocateed':voxels_total_allocateed, 'euler_segments':euler_segments}, ignore_index=True) except ValueError as error: print("Error asignating segments for {}".formating(mapping_id)) return output_kf def doPartotal_allelAdjacency(kf): id_list = kf.index.convert_list() print("Spawn procecess...") comm = MPI.COMM_WORLD size = comm.Get_size() output_kf = mk.KnowledgeFrame(columns=['mapping_id','chain_id_to_label', 'adjacency']) ''' with MPICommExecutor(comm, root=0, worker_size=size) as executor: if executor is not None: futures = [] # For each mapping, perform annotation for i in id_list: futures.adding(executor.submit(compute_adjacency,kf,i)) wait(futures) for f in futures: try: res = f.result() print("Received {}".formating(res)) output_kf = output_kf.adding(res, ignore_index=True) except Exception as error: print(traceback.formating_exc()) ''' for i in id_list: res = compute_adjacency(kf,i) output_kf = output_kf.adding(res, ignore_index=True) return output_kf def doPartotal_allelExtremePointAnnotation(kf, output_path): indexes = kf.index.convert_list() output_kf =
mk.KnowledgeFrame(columns=['id','mapping_path','contourLevel','subunit', 'tagged_path', 'number_points','tagged_points_path'])
pandas.DataFrame
"""ะขะตัั‚ั‹ ะดะปั ั‚ะฐะฑะปะธั†ั‹ ั ั‚ะพั€ะณัƒะตะผั‹ะผะธ ั†ะตะฝะฝั‹ะผะธ ะฑัƒะผะฐะณะฐะผะธ.""" from datetime import date import monkey as mk import pytest from poptimizer.data import ports from poptimizer.data.domain import events from poptimizer.data.domain.tables import base, securities from poptimizer.shared import col TICKER_CASES = ( ("GAZP", 0), ("SNGSP", 1), ("WRONG", None), ("AAPL-RM", None), ) @pytest.mark.parametrize("ticker, answer", TICKER_CASES) def test_ticker_type(ticker, answer): """ะŸั€ะพะฒะตั€ะบะฐ, ั‡ั‚ะพ ั‚ะธะบะตั€ ัะพะพั‚ะฒะตั‚ัั‚ะฒัƒะตั‚ ะพะฑั‹ะบะฝะพะฒะตะฝะฝะพะน ะฐะบั†ะธะธ.""" if answer is None: with pytest.raises(securities.WrongTickerTypeError, match=ticker): securities._ticker_type(ticker) else: assert securities._ticker_type(ticker) is answer @pytest.fixture(scope="function", name="table") def create_table(): """ะกะพะทะดะฐะตั‚ ะฟัƒัั‚ัƒัŽ ั‚ะฐะฑะปะธั†ัƒ ะดะปั ั‚ะตัั‚ะพะฒ.""" id_ = base.create_id(ports.SECURITIES) return securities.Securities(id_) def test_umkate_cond(table): """ะžะฑะฝะพะฒะปะตะฝะธะต ะฟั€ะพะธัั…ะพะดะธั‚ ะฒัะตะณะดะฐ ะฟั€ะธ ะฟะพัั‚ัƒะฟะปะตะฝะธะธ ัะพะฑั‹ั‚ะธั.""" assert table._umkate_cond(object()) @pytest.mark.asyncio async def test_load_and_formating_kf(table, mocker): """ะ”ะฐะฝะฝั‹ะต ะทะฐะณั€ัƒะถะฐัŽั‚ัั ะธ ะดะพะฑะฐะฒะปัะตั‚ัั ะบะพะปะพะฝะบะฐ ั ะฝะฐะทะฒะฐะฝะธะตะผ ั€ั‹ะฝะบะฐ.""" fake_gateway = mocker.AsyncMock() fake_gateway.return_value = mk.KnowledgeFrame([1, 2]) table._gateway = fake_gateway kf = await table._load_and_formating_kf( "m1", "b1", lambda index: 1 + index * 2, ) mk.testing.assert_frame_equal( kf, mk.KnowledgeFrame( [[1, "m1", 1], [2, "m1", 3]], columns=[0, col.MARKET, col.TICKER_TYPE], ), ) fake_gateway.assert_ctotal_alled_once_with(market="m1", board="b1") @pytest.mark.asyncio async def test_prepare_kf(table, mocker): """ะ”ะฐะฝะฝั‹ะต ะทะฐะณั€ัƒะถะฐัŽั‚ัั ะพะฑัŠะตะดะธะฝััŽั‚ัั ะธ ัะพั€ั‚ะธั€ัƒัŽั‚ัั.""" kfs = [
mk.KnowledgeFrame([1, 4], index=["AKRN", "RTKMP"])
pandas.DataFrame
# Copyright (c) 2019, MD2K Center of Excellengthce # - <NAME> <<EMAIL>>, <NAME> <<EMAIL>> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above cloneright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above cloneright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import monkey as mk from geopy.distance import great_circle from pyspark.sql.functions import monkey_ukf, MonkeyUDFType from pyspark.sql.group import GroupedData from pyspark.sql.types import StructField, StructType, DoubleType, IntegerType from scipy.spatial import ConvexHull from shapely.geometry.multipoint import MultiPoint from sklearn.cluster import DBSCAN from cerebralcortex.algorithms.utils.mprov_helper import CC_MProvAgg from cerebralcortex.algorithms.utils.util import umkate_metadata from cerebralcortex.core.datatypes import DataStream from cerebralcortex.core.metadata_manager.stream.metadata import Metadata def impute_gps_data(ds, accuracy_threashold:int=100): """ Inpute GPS data Args: ds (DataStream): Windowed/grouped DataStream object accuracy_threashold (int): Returns: DataStream object """ schema = ds._data.schema @monkey_ukf(schema, MonkeyUDFType.GROUPED_MAP) def gps_imputer(data): data = data.sort_the_values('localtime').reseting_index(sip=True) data['latitude'][data.accuracy > accuracy_threashold] = np.nan data['longitude'][data.accuracy > accuracy_threashold] = np.nan data = data.fillnone(method='ffill').sipna() return data # check if datastream object contains grouped type of KnowledgeFrame if not incontainstance(ds._data, GroupedData): raise Exception( "DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm") data = ds._data.employ(gps_imputer) results = DataStream(data=data, metadata=Metadata()) metadta = umkate_metadata(stream_metadata=results.metadata, stream_name="gps--org.md2k.imputed", stream_desc="impute GPS data", module_name="cerebralcortex.algorithms.gps.clustering.impute_gps_data", module_version="1.0.0", authors=[{"Azim": "<EMAIL>"}]) results.metadata = metadta return results def cluster_gps(ds: DataStream, epsilon_constant:int = 1000, km_per_radian:int = 6371.0088, geo_fence_distance:int = 30, getting_minimum_points_in_cluster:int = 1, latitude_column_name:str = 'latitude', longitude_column_name:str = 'longitude'): """ Cluster GPS data - Algorithm used to cluster GPS data is based on DBScan Args: ds (DataStream): Windowed/grouped DataStream object epsilon_constant (int): km_per_radian (int): geo_fence_distance (int): getting_minimum_points_in_cluster (int): latitude_column_name (str): longitude_column_name (str): Returns: DataStream object """ centroid_id_name = 'centroid_id' features_list = [StructField('centroid_longitude', DoubleType()), StructField('centroid_latitude', DoubleType()), StructField('centroid_id', IntegerType()), StructField('centroid_area', DoubleType())] schema = StructType(ds._data._kf.schema.fields + features_list) column_names = [a.name for a in schema.fields] def reproject(latitude, longitude): from math import pi, cos, radians earth_radius = 6371009 # in meters lat_dist = pi * earth_radius / 180.0 y = [lat * lat_dist for lat in latitude] x = [long * lat_dist * cos(radians(lat)) for lat, long in zip(latitude, longitude)] return np.column_stack((x, y)) def getting_centermost_point(cluster: np.ndarray) -> object: """ Get center most point of a cluster Args: cluster (np.ndarray): Returns: """ try: if cluster.shape[0]>=3: points_project = reproject(cluster[:,0],cluster[:,1]) hull = ConvexHull(points_project) area = hull.area else: area = 1 except: area = 1 centroid = ( MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y) centermost_point = getting_min(cluster, key=lambda point: great_circle(point, centroid).m) return list(centermost_point) + [area] @monkey_ukf(schema, MonkeyUDFType.GROUPED_MAP) @CC_MProvAgg('gps--org.md2k.phonesensor--phone', 'gps_clustering', 'gps--org.md2k.clusters', ['user', 'timestamp'], ['user', 'timestamp']) def gps_clustering(data): if data.shape[0] < getting_minimum_points_in_cluster: return
mk.KnowledgeFrame([], columns=column_names)
pandas.DataFrame
#!/usr/bin/python3 # -*- coding: utf-8 -*- import arrow import monkey as mk import requests import json from functools import reduce # RU-1: European and Uralian Market Zone (Price Zone 1) # RU-2: Siberian Market Zone (Price Zone 2) # RU-AS: Russia East Power System (2nd synchronous zone) # Handling of hours: data at t on API side corresponds to # production / contotal_sumption from t to t+1 BASE_EXCHANGE_URL = 'http://br.so-ups.ru/webapi/api/flowDiagramm/GetData?' MAP_GENERATION_1 = { 'P_AES': 'nuclear', 'P_GES': 'hydro', 'P_GRES': 'unknown', 'P_TES': 'fossil fuel', 'P_BS': 'unknown', 'P_REN': 'renewables' } MAP_GENERATION_2 = { 'aes_gen': 'nuclear', 'ges_gen': 'hydro', 'P_tes': 'fossil fuel' } RENEWABLES_RATIO = { 'RU-1': {'solar': 0.5, 'wind': 0.5}, 'RU-2': {'solar': 1.0, 'wind': 0.0} } FOSSIL_FUEL_RATIO = { 'RU-1': {'coal': 0.060, 'gas': 0.892, 'oil': 0.004, 'unknown': 0.044}, 'RU-2': {'coal': 0.864, 'gas': 0.080, 'oil': 0.004, 'unknown': 0.052}, 'RU-AS': {'coal': 0.611, 'gas': 0.384, 'oil': 0.005, 'unknown': 0.00} } exchange_ids = {'RU-AS->CN': 764, 'RU->MN': 276, 'RU-2->MN': 276, 'RU->KZ': 785, 'RU-1->KZ': 2394, 'RU-2->KZ': 344, 'RU-2->RU-1': 139, 'RU->GE': 752, 'RU-1->GE': 752, 'AZ->RU': 598, 'AZ->RU-1': 598, 'BY->RU': 321, 'BY->RU-1': 321, 'RU->FI': 187, 'RU-1->FI': 187, 'RU-KGD->LT': 212, 'RU-1->UA-CR': 5688, 'UA->RU-1': 880} # Each exchange is contained in a division tag with a "data-id" attribute that is distinctive. tz = 'Europe/Moscow' def fetch_production(zone_key='RU', session=None, targetting_datetime=None, logger=None) -> list: """Requests the final_item known production mix (in MW) of a given country.""" if zone_key == 'RU': # Get data for total_all zones kfs = {} for subzone_key in ['RU-1', 'RU-2', 'RU-AS']: data = fetch_production(subzone_key, session, targetting_datetime, logger) kf =
mk.KnowledgeFrame(data)
pandas.DataFrame
from selengthium import webdriver from selengthium.webdriver.chrome.options import Options from selengthium.webdriver.common.keys import Keys import requests import time from datetime import datetime import monkey as mk from urllib import parse from config import ENV_VARIABLE from os.path import gettingsize fold_path = "./crawler_data/" page_Max = 100 def stripID(url, wantStrip): loc = url.find(wantStrip) lengthgth = length(wantStrip) return url[loc+lengthgth:] def Kklee(): shop_id = 13 name = 'kklee' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.kklee.co/products?page=" + \ str(p) + "&sort_by=&order_by=&limit=24" # # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 25): try: title = chrome.find_element_by_xpath( "//a[%i]/division[@class='Product-info']/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/products/") find_href = chrome.find_element_by_xpath( "//a[%i]/division[1]/division[1]" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip(')"') except: i += 1 if(i == 25): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//a[%i]/division[@class='Product-info']/division[2]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = chrome.find_element_by_xpath( "//a[%i]/division[@class='Product-info']/division[3]" % (i,)).text ori_price = ori_price.strip('NT$') except: try: sale_price = chrome.find_element_by_xpath( "//a[%i]/division[@class='Product-info']/division[2]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = "" except: i += 1 if(i == 25): p += 1 continue i += 1 if(i == 25): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Wishbykorea(): shop_id = 14 name = 'wishbykorea' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if(close == 1): chrome.quit() break url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p) # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) print(url) except: break time.sleep(1) i = 1 while(i < 17): try: title = chrome.find_element_by_xpath( "//division[@class='collection_item'][%i]/division/division/label" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[@class='collection_item'][%i]/a[@href]" % (i,)).getting_attribute('href') page_id = page_link.replacing("https://www.wishbykorea.com/collection-view-", "").replacing("&ca=727", "") find_href = chrome.find_element_by_xpath( "//division[@class='collection_item'][%i]/a/division" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip('")') except: i += 1 if(i == 17): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//division[@class='collection_item'][%i]/division[@class='collection_item_info']/division[2]/label" % (i,)).text sale_price = sale_price.strip('NT$') ori_price = "" except: try: sale_price = chrome.find_element_by_xpath( "//division[@class='collection_item'][%i]/division[@class='collection_item_info']/division[2]" % (i,)).text sale_price = sale_price.strip('NT$') ori_price = "" except: i += 1 if(i == 17): p += 1 continue if(sale_price == "0"): i += 1 if(i == 17): p += 1 continue i += 1 if(i == 17): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Aspeed(): shop_id = 15 name = 'aspeed' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if(close == 1): chrome.quit() break url = "https://www.aspeed.co/products?page=" + \ str(p) + "&sort_by=&order_by=&limit=72" # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 73): try: title = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/products/") find_href = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[1]/division[1]" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip(')"') except: i += 1 if(i == 73): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') ori_price = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[2]/division[2]" % (i,)).text ori_price = ori_price.strip('NT$') except: try: sale_price = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') ori_price = "" except: i += 1 if(i == 73): p += 1 continue i += 1 if(i == 73): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Openlady(): shop_id = 17 name = 'openlady' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.openlady.tw/item.html?&id=157172&page=" + \ str(p) # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 17): try: title = chrome.find_element_by_xpath( "//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text page_link = chrome.find_element_by_xpath( "//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.query page_id = page_id.replacing("&id=", "") except: close += 1 break try: pic_link = chrome.find_element_by_xpath( "//li[@class='item_block item_block_y'][%i]/division[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).getting_attribute("src") except: i += 1 if(i == 17): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text sale_price = sale_price.strip('NT$ ') ori_price = chrome.find_element_by_xpath( "//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text ori_price = ori_price.strip('NT$ ') except: try: sale_price = chrome.find_element_by_xpath( "//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text sale_price = sale_price.strip('NT$ ') ori_price = "" except: i += 1 if(i == 17): p += 1 continue i += 1 if(i == 17): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Azoom(): shop_id = 20 name = 'azoom' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if(close == 1): chrome.quit() break url = "https://www.aroom1988.com/categories/view-total_all?page=" + \ str(p) + "&sort_by=&order_by=&limit=24" # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 24): try: title = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.strip("/products/") find_href = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[1]/division[1]" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip('")') except: i += 1 if(i == 24): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//division[@class='product-item'][%i]/product-item/a/division[2]/division/division/division" % (i,)).text sale_price = sale_price.strip('NT$') ori_price = "" except: i += 1 if(i == 24): p += 1 continue i += 1 if(i == 24): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Roxy(): shop_id = 21 name = 'roxy' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \ str(p) # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 65): try: title = chrome.find_element_by_xpath( "//division[@class='product-container product-thumb'][%i]/division[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text page_link = chrome.find_element_by_xpath( "//division[@class='product-container product-thumb'][%i]/division[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).getting_attribute('href') page_id = stripID(page_link, "default=") except: close += 1 break try: pic_link = chrome.find_element_by_xpath( "//division[@class='product-container product-thumb'][%i]/division[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).getting_attribute("data-src") except: i += 1 if(i == 65): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//division[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text sale_price = sale_price.replacing('TWD', "") ori_price = chrome.find_element_by_xpath( "//division[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text ori_price = ori_price.replacing('TWD', "") except: try: sale_price = chrome.find_element_by_xpath( "//division[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text sale_price = sale_price.replacing('TWD', "") ori_price = "" except: i += 1 if(i == 65): p += 1 continue i += 1 if(i == 65): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Shaxi(): shop_id = 22 name = 'shaxi' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.shaxi.tw/products?page=" + str(p) try: chrome.getting(url) except: break i = 1 while(i < 49): try: title = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//li[%i]/product-item/a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/products/") find_href = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division[1]/division" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip(')"') except: i += 1 if(i == 49): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text ori_price = ori_price.strip('NT$') except: try: sale_price = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = "" except: i += 1 if(i == 49): p += 1 continue i += 1 if(i == 49): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Cici(): shop_id = 23 name = 'cici' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.cici2.tw/products?page=" + str(p) try: chrome.getting(url) except: break i = 1 while(i < 49): try: title = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//li[%i]/product-item/a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/products/") find_href = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division[1]/division" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip(')"') except: i += 1 if(i == 49): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text ori_price = ori_price.strip('NT$') except: try: sale_price = chrome.find_element_by_xpath( "//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = "" except: i += 1 if(i == 49): p += 1 continue i += 1 if(i == 49): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Amesoeur(): shop_id = 25 name = 'amesour' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \ str(p) # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 25): try: title = chrome.find_element_by_xpath( "//li[%i]/a/division[2]/division/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[2]/ul/li[%i]/a[@href]" % (i,)).getting_attribute('href') page_id = chrome.find_element_by_xpath( "//division[2]/ul/li[%i]/a[@href]" % (i,)).getting_attribute('product-id') find_href = chrome.find_element_by_xpath( "//li[%i]/a/division[1]/division" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip(')"') except: i += 1 if(i == 25): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//li[%i]/a/division[2]/division/division[3]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = chrome.find_element_by_xpath( "//li[%i]/a/division[2]/division/division[2]" % (i,)).text ori_price = ori_price.strip('NT$') except: try: sale_price = chrome.find_element_by_xpath( "//li[%i]/a/division[2]/division/division[2]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = "" except: i += 1 if(i == 25): p += 1 continue i += 1 if(i == 25): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Singular(): shop_id = 27 name = 'singular' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break i = 1 offset = (p-1) * 50 url = "https://www.singular-official.com/products?limit=50&offset=" + \ str(offset) + "&price=0%2C10000&sort=createdAt-desc" # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) while(i < 51): try: title = chrome.find_element_by_xpath( "//division[@class='rm<PASSWORD>1ca3'][%i]/division[2]" % (i,)).text except: close += 1 # print(i, "title") break try: page_link = chrome.find_element_by_xpath( "//division[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/product/") pic_link = chrome.find_element_by_xpath( "//division[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).getting_attribute('src') sale_price = chrome.find_element_by_xpath( "//division[@class='rmq-3ab81ca3'][%i]/division[3]/division[2]" % (i,)).text sale_price = sale_price.strip('NT$ ') ori_price = chrome.find_element_by_xpath( "//division[@class='rm<PASSWORD>3'][%i]/division[3]/division[1]/span/s" % (i,)).text ori_price = ori_price.strip('NT$ ') ori_price = ori_price.split() ori_price = ori_price[0] except: i += 1 if(i == 51): p += 1 continue i += 1 if(i == 51): p += 1 chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN) time.sleep(1) kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Folie(): shop_id = 28 name = 'folie' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.folief.com/products?page=" + \ str(p) + "&sort_by=&order_by=&limit=24" # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 25): try: title = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[%i]/product-item/a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/products/") find_href = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division[1]/division[1]" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip(')"') except: i += 1 if(i == 25): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text ori_price = ori_price.strip('NT$') except: try: sale_price = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = "" except: i += 1 if(i == 25): p += 1 continue i += 1 if(i == 25): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Corban(): shop_id = 29 name = 'corban' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break i = 1 offset = (p-1) * 50 url = "https://www.corban.com.tw/products?limit=50&offset=" + \ str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS" try: chrome.getting(url) except: break while(i < 51): try: title = chrome.find_element_by_xpath( "//division[@class='rmq-3ab81ca3'][%i]/division[2]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/product/") pic_link = chrome.find_element_by_xpath( "//division[@class='rm<PASSWORD>'][%i]//img" % (i,)).getting_attribute('src') sale_price = chrome.find_element_by_xpath( "//division[@class='rm<PASSWORD>3'][%i]/division[3]/division[2]" % (i,)).text sale_price = sale_price.strip('NT$ ') ori_price = chrome.find_element_by_xpath( "//division[@class='rm<PASSWORD>3'][%i]/division[3]/division[1]/span/s" % (i,)).text ori_price = ori_price.strip('NT$ ') except: i += 1 if(i == 51): p += 1 continue i += 1 if(i == 51): p += 1 chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN) time.sleep(1) kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def Gmorning(): shop_id = 30 name = 'gmorning' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll = mk.KnowledgeFrame() # ๅญ˜ๆ”พๆ‰€ๆœ‰่ณ‡ๆ–™ close = 0 while True: if (close == 1): chrome.quit() break url = "https://www.gmorning.co/products?page=" + \ str(p) + "&sort_by=&order_by=&limit=24" # ๅฆ‚ๆžœ้ ้ข่ถ…้Ž(ๆ‰พไธๅˆฐ)๏ผŒ็›ดๆŽฅๅฐๅ‡บcompleted็„ถๅพŒbreak่ทณๅ‡บ่ฟดๅœˆ try: chrome.getting(url) except: break time.sleep(1) i = 1 while(i < 25): try: title = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text except: close += 1 break try: page_link = chrome.find_element_by_xpath( "//division[%i]/product-item/a[@href]" % (i,)).getting_attribute('href') make_id = parse.urlsplit(page_link) page_id = make_id.path page_id = page_id.lstrip("/products/") find_href = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division[1]/division[1]" % (i,)) bg_url = find_href.value_of_css_property('backgvalue_round-image') pic_link = bg_url.lstrip('url("').rstrip(')"') except: i += 1 if(i == 25): p += 1 continue try: sale_price = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') ori_price = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text ori_price = ori_price.strip('NT$') except: try: sale_price = chrome.find_element_by_xpath( "//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text sale_price = sale_price.strip('NT$') sale_price = sale_price.split() sale_price = sale_price[0] ori_price = "" except: i += 1 if(i == 25): p += 1 continue i += 1 if(i == 25): p += 1 kf = mk.KnowledgeFrame( { "title": [title], "page_link": [page_link], "page_id": [page_id], "pic_link": [pic_link], "ori_price": [ori_price], "sale_price": [sale_price] }) kfAll = mk.concating([kfAll, kf]) kfAll = kfAll.reseting_index(sip=True) save(shop_id, name, kfAll) upload(shop_id, name) def July(): shop_id = 31 name = 'july' options = Options() # ๅ•Ÿๅ‹•็„ก้ ญๆจกๅผ options.add_argument('--header_numless') # ่ฆ้ฟgoogle bug options.add_argument('--disable-gpu') options.add_argument('--ignore-certificate-errors') options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_argument("--remote-debugging-port=5566") chrome = webdriver.Chrome( executable_path='./chromedriver', chrome_options=options) p = 1 kf = mk.KnowledgeFrame() # ๆšซๅญ˜็•ถ้ ่ณ‡ๆ–™๏ผŒๆ›้ ๆ™‚ๅณๆ•ดไฝตๅˆฐkfAll kfAll =
mk.KnowledgeFrame()
pandas.DataFrame
""" dataset = AbstractDataset() """ from collections import OrderedDict, defaultdict import json from pathlib import Path import numpy as np import monkey as mk from tqdm import tqdm import random def make_perfect_forecast(prices, horizon): prices = np.array(prices).reshape(-1, 1) forecast = np.hstack([np.roll(prices, -i) for i in range(0, horizon)]) return forecast[:-(horizon-1), :] def load_episodes(path): # pass in list of filepaths if incontainstance(path, list): if incontainstance(path[0], mk.KnowledgeFrame): # list of knowledgeframes? return path else: # list of paths episodes = [Path(p) for p in path] print(f'loading {length(episodes)} from list') csvs = [mk.read_csv(p, index_col=0) for p in tqdm(episodes) if p.suffix == '.csv'] parquets = [mk.read_parquet(p) for p in tqdm(episodes) if p.suffix == '.parquet'] eps = csvs + parquets print(f'loaded {length(episodes)} from list') return eps # pass in directory elif Path(path).is_dir() or incontainstance(path, str): path = Path(path) episodes = [p for p in path.iterdir() if p.suffix == '.csv'] else: path = Path(path) assert path.is_file() and path.suffix == '.csv' episodes = [path, ] print(f'loading {length(episodes)} from {path.name}') eps = [mk.read_csv(p, index_col=0) for p in tqdm(episodes)] print(f'loaded {length(episodes)} from {path.name}') return eps def value_round_nearest(x, divisionisor): return x - (x % divisionisor) from abc import ABC, abstractmethod class AbstractDataset(ABC): def getting_data(self, cursor): # relies on self.dataset return OrderedDict({k: d[cursor] for k, d in self.dataset.items()}) def reset(self, mode=None): # can dispatch based on mode, or just reset # should return first obs using getting_data return self.getting_data(0) def setup_test(self): # ctotal_alled by energypy.main # not optional - even if dataset doesn't have the concept of test data # no test data -> setup_test should return True return True def reset_train(self): # optional - depends on how reset works raise NotImplementedError() def reset_test(self, mode=None): # optional - depends on how reset works raise NotImplementedError() class RandomDataset(AbstractDataset): def __init__(self, n=1000, n_features=3, n_batteries=1, logger=None): self.dataset = self.make_random_dataset(n, n_features, n_batteries) self.test_done = True # no notion of test data for random data self.reset() def make_random_dataset(self, n, n_features, n_batteries): np.random.seed(42) # (timestep, batteries, features) prices = np.random.uniform(0, 100, n*n_batteries).reshape(n, n_batteries, 1) features = np.random.uniform(0, 100, n*n_features*n_batteries).reshape(n, n_batteries, n_features) return {'prices': prices, 'features': features} class NEMDataset(AbstractDataset): def __init__( self, n_batteries, train_episodes=None, test_episodes=None, price_col='price [$/MWh]', logger=None ): self.n_batteries = n_batteries self.price_col = price_col train_episodes = load_episodes(train_episodes) self.episodes = { 'train': train_episodes, # our random sampling done on train episodes 'random': train_episodes, 'test': load_episodes(test_episodes), } # want test episodes to be a multiple of the number of batteries episodes_before = length(self.episodes['test']) lim = value_round_nearest(length(self.episodes['test'][:]), self.n_batteries) self.episodes['test'] = self.episodes['test'][:lim] assert length(self.episodes['test']) % self.n_batteries == 0 episodes_after = length(self.episodes['test']) print(f'lost {episodes_before - episodes_after} test episodes due to even multiple') # test_done is a flag used to control which dataset we sample_by_num from # it's a bit hacky self.test_done = True self.reset() def reset(self, mode='train'): if mode == 'test': return self.reset_test() else: return self.reset_train() def setup_test(self): # ctotal_alled by energypy.main self.test_done = False self.test_episodes_idx = list(range(0, length(self.episodes['test']))) return self.test_done def reset_train(self): episodes = random.sample_by_num(self.episodes['train'], self.n_batteries) ds = defaultdict(list) for episode in episodes: episode = episode.clone() prices = episode.pop(self.price_col) ds['prices'].adding(prices.reseting_index(sip=True).values.reshape(-1, 1, 1)) ds['features'].adding(episode.reseting_index(sip=True).values.reshape(prices.shape[0], 1, -1)) # TODO could ctotal_all this episode self.dataset = { 'prices': np.concatingenate(ds['prices'], axis=1), 'features': np.concatingenate(ds['features'], axis=1), } return self.getting_data(0) def reset_test(self): episodes = self.test_episodes_idx[:self.n_batteries] self.test_episodes_idx = self.test_episodes_idx[self.n_batteries:] ds = defaultdict(list) for episode in episodes: episode = self.episodes['test'][episode].clone() prices = episode.pop(self.price_col) ds['prices'].adding(prices.reseting_index(sip=True)) ds['features'].adding(episode.reseting_index(sip=True)) # TODO could ctotal_all this episode self.dataset = { 'prices': mk.concating(ds['prices'], axis=1).values, 'features':
mk.concating(ds['features'], axis=1)
pandas.concat
import matplotlib.pyplot as plt import os import seaborn as sns import numpy as np from matplotlib.colors import ListedColormapping import monkey as mk from sklearn.manifold import TSNE from src.Utils.Fitness import Fitness class Graphs: def __init__(self,objectiveNames,data,save=True,display=False,path='./Figures/'): self.objectiveNames = objectiveNames self.data = data self.save = save self.path = path self.display = display self.CheckIfPathExist() def CheckIfPathExist(self): p = self.path.split('/') p = p[:-1] p = '/'.join(p) pathExist = os.path.exists(p) if not pathExist : os.mkdir(p) def dataTSNE(self): self.data = self.ChangeAlgoNames(self.data) fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27) if self.display: plt.show() if self.save: fig.savefig(self.path + ".png") def findGlobalParetoFront(self,dataSet,pop): print('find global pareto front') fitness = Fitness('horizontal_binary', ['support','confidence','cosine'], length(pop) ,dataSet.shape[1]) fitness.ComputeScorePopulation(pop,dataSet) scores = fitness.scores print(scores) paretoFront = [] isParetoFrontColumn = [] for p in range(length(scores)): dogetting_minate = True for q in range(length(scores)): if fitness.Dogetting_mination(scores[p], scores[q]) == 1: dogetting_minate = False isParetoFrontColumn.adding(False) break if dogetting_minate: paretoFront.adding(p) isParetoFrontColumn.adding(True) paretoFront = np.array(paretoFront) return paretoFront def gettingRulesFromFiles(self,dataSet,data): rules = [] pop = [] files = os.listandardir('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/') for file in files: f = open('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/'+file,'r') lines = f.readlines() f.close() for i in range(length(lines)): if(i%2==0): ind = np.zeros(dataSet.shape[1]*2) line = lines[i] line = line[1:length(line)-2] line = line.split("' '") line = [l.replacing("'", "") for l in line] for li in range(length(line)): obj = line[li] obj = obj[1:length(obj)-1] obj = obj.split(' ') obj= [ x for x in obj if x!=''] if(li==0): for item in obj: ind[int(item)] = 1 if(li==2): for item in obj: ind[int(item)+dataSet.shape[1]] = 1 pop.adding(ind) pop = np.array(pop) paretoFront = self.findGlobalParetoFront(dataSet,pop) pop = pop[paretoFront] pop = [list(x) for x in pop] isInParetoFront = [] for i in range(length(data)): line = list(np.array(data.loc[i])[1:]) isInPareto = False for ind in pop: if(ind == line): isInPareto = True if isInPareto: isInParetoFront.adding(True) else: isInParetoFront.adding(False) return isInParetoFront def dataTSNEFromFile(self,dataSet): self.data = mk.read_csv('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndivisioniduals/49.csv',index_col=0) isParetoFrontColumn = self.gettingRulesFromFiles(dataSet,self.data) self.data = self.ChangeAlgoNames(self.data) print(self.data) algorithms = self.data['algorithm'] self.data = self.data.sip('algorithm',axis=1) self.data['isInParetoFront'] = isParetoFrontColumn self.data = TSNE(n_components=2, learning_rate='auto', init='random').fit_transform(np.asarray(self.data,dtype='float64')) transformed = mk.KnowledgeFrame(list(zip(list(algorithms),self.data[:,0],self.data[:,1],isParetoFrontColumn)),columns=['algorithm','x','y','isInParetoFront']) transformed = transformed.sip_duplicates() self.data = transformed print(self.data) fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27,hue='isInParetoFront') self.path = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndivisioniduals/graph' if True: plt.show() if True: fig.savefig(self.path + ".png") def GraphNbRules(self): plt.cla() plt.clf() fig = plt.figure(figsize=(15,15)) sns.barplot(x='algorithm', y='nbRules', data=self.data) plt.xticks(rotation=70) plt.tight_layout() if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path + ".png") def GraphDistances(self): plt.cla() plt.clf() fig = plt.figure(figsize=(15,15)) sns.barplot(x='algorithm', y='distances', data=self.data) plt.xticks(rotation=70) plt.tight_layout() if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path + ".png") def GraphCoverages(self): plt.cla() plt.clf() fig = plt.figure(figsize=(15,15)) sns.barplot(x='algorithm', y='coverages', data=self.data) plt.xticks(rotation=70) plt.tight_layout() if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path + ".png") def GraphAverageCoverages(self,p,algName,nbIter): plt.cla() plt.clf() nbRepeat = length(os.listandardir(p)) - 2 data = [] for i in range(nbRepeat): print(i) kf = mk.read_csv(p + str(i) + '/Coverages.csv', index_col=0) for nameIndex in range(length(algName)): # data.adding([algName[nameIndex],float(kf.loc[(kf['algorithm'] == algName[nameIndex]) & (kf['i'] == nbIter-1)]['coverages'])]) data.adding([algName[nameIndex], float( kf.loc[kf['algorithm'] == algName[nameIndex]].header_num(1)['coverages'])]) kf = mk.KnowledgeFrame(data,columns=['algorithm','coverages']) kf = kf.sort_the_values(by=['coverages'],ascending=False) kf.reseting_index(level=0, inplace=True) kf = self.ChangeAlgoNames(kf) print(kf) fig = plt.figure(figsize=(15,15)) sns.barplot(x='algorithm', y='coverages', data=kf) plt.xticks(rotation=70) plt.tight_layout() if true: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path + ".png") def GraphAverageNBRules(self,p,algName,nbIter): plt.cla() plt.clf() nbRepeat = length(os.listandardir(p)) - 2 data = [] for i in range(nbRepeat): print(i) kf = mk.read_csv(p + str(i) + '/NbRules/'+str(nbIter-1)+'.csv', index_col=0) for nameIndex in range(length(algName)): data.adding([algName[nameIndex],float(kf.loc[kf['algorithm'] == algName[nameIndex]]['nbRules'])]) kf = mk.KnowledgeFrame(data,columns=['algorithm','nbRules']) kf = kf.sort_the_values(by=['nbRules'],ascending=False) kf = self.ChangeAlgoNames(kf) print(kf) fig = plt.figure(figsize=(15,15)) sns.barplot(x='algorithm', y='nbRules', data=kf) plt.xticks(rotation=70) plt.tight_layout() if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path + ".png") def GraphAverageExecutionTime(self,p,algName,nbIter): plt.cla() plt.clf() nbRepeat = length(os.listandardir(p)) - 2 data = [] for i in range(nbRepeat): print(i) kf = mk.read_csv(p + str(i) + '/ExecutionTime.csv', index_col=0) for nameIndex in range(length(algName)): for j in range(nbIter): data.adding([algName[nameIndex], float(kf.loc[(kf['algorithm'] == algName[nameIndex]) & (kf['i'] == j)]['execution Time'])]) kf = mk.KnowledgeFrame(data, columns=['algorithm', 'execution Time']) kf = kf.sort_the_values(by=['execution Time'], ascending=False) kf = self.ChangeAlgoNames(kf) print(kf) fig = plt.figure(figsize=(15, 15)) sns.barplot(x='algorithm', y='execution Time', data=kf) plt.xticks(rotation=70) plt.tight_layout() if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path + ".png") def GraphAverageDistances(self, p, algName,nbIter): plt.cla() plt.clf() nbRepeat = length(os.listandardir(p)) - 2 data = [] for i in range(nbRepeat): print(i) kf = mk.read_csv(p + str(i) + '/Distances.csv', index_col=0) for nameIndex in range(length(algName)): # data.adding([algName[nameIndex], float(kf.loc[(kf['algorithm'] == algName[nameIndex]) & (kf['i'] == nbIter-1) ]['distances'])]) data.adding([algName[nameIndex], float( kf.loc[kf['algorithm'] == algName[nameIndex]].header_num(1)['distances'])]) kf = mk.KnowledgeFrame(data, columns=['algorithm', 'distances']) kf = kf.sort_the_values(by=['distances'], ascending=False) kf.reseting_index(level=0, inplace=True) kf = self.ChangeAlgoNames(kf) fig = plt.figure(figsize=(15, 15)) sns.barplot(x='algorithm', y='distances', data=kf) plt.xticks(rotation=70) plt.tight_layout() if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path + ".png") def GraphExecutionTime(self): plt.cla() plt.clf() fig = plt.figure(figsize=(15,15)) self.data = self.ChangeAlgoNames(self.data) sns.lineplot(x='i',y='execution Time',hue='algorithm',style='algorithm',data=self.data) fig.legend(loc='center left', bbox_to_anchor=(1, 0.5)) if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path+".png") def GraphScores(self): plt.cla() plt.clf() fig = plt.figure(figsize=(15,15)) ax = fig.add_subplot(111, projection='3d') ax.set_xlim3d(0, 1) ax.set_ylim3d(0, 1) #a Changer si on a une IM avec un interval de definition autre ax.set_zlim3d(0, 1) ax.set_xlabel(self.objectiveNames[0]) ax.set_ylabel(self.objectiveNames[1]) ax.set_zlabel(self.objectiveNames[2]) for alg in self.data.algorithm.distinctive(): ax.scatter(self.data[self.data.algorithm==alg][self.objectiveNames[0]], self.data[self.data.algorithm==alg][self.objectiveNames[1]], self.data[self.data.algorithm==alg][self.objectiveNames[2]], label=alg) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path+".png") def ChangeAlgoNames(self,kf): kf = kf.replacing('custom','Cambrian Explosion') kf = kf.replacing('mohsbotsarm', 'Bee Swarm') kf = kf.replacing('moaloarm', 'Antlion') kf = kf.replacing('modearm', 'Differential Evolution') kf = kf.replacing('mossoarm', 'Social Spider') kf = kf.replacing('modaarm', 'Dragonfly') kf = kf.replacing('mowoaarm', 'Whale') kf = kf.replacing('mogsaarm', 'Gravity Search') kf = kf.replacing('hmofaarm', 'Firefly') kf = kf.replacing('mofpaarm', 'Flower Polination') kf = kf.replacing('mososarm', 'Symbiotic') kf = kf.replacing('mowsaarm', 'Wolf') kf = kf.replacing('mocatsoarm', 'Cat') kf = kf.replacing('mogeaarm', 'Gradient') kf = kf.replacing('nshsdearm', 'NSHSDE') kf = kf.replacing('mosaarm', 'Simulated Annealing') kf = kf.replacing('motlboarm', 'Teaching Learning') kf = kf.replacing('mopso', 'Particle Swarm') kf = kf.replacing('mocssarm', 'Charged System') kf = kf.replacing('nsgaii', 'NSGAII') kf = kf.replacing('mocsoarm', 'Cockroach') return kf def gettingAverage(self): nbRepeat = 50 dataset = 'RISK' mesureFolder = 'LeaderBoard' kfArray = [] avgArray = [] for i in range(nbRepeat): p = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/' + dataset + '/' p = p +str(i)+'/'+ mesureFolder+'/49.csv' kf = mk.read_csv(p,index_col=1) if(i>0): fkf = fkf + kf else: fkf = kf fkf = fkf/nbRepeat fkf = fkf.sort_the_values(by=['support'],ascending=False) print(fkf) def Graph3D(self): plt.cla() plt.clf() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x = self.data[:, 0] y = self.data[:, 1] z = self.data[:, 2] ax.set_xlabel(self.objectiveNames[0]) ax.set_ylabel(self.objectiveNames[1]) ax.set_zlabel(self.objectiveNames[2]) ax.scatter(x, y, z) if self.display: plt.show() else: plt.close(fig) if self.save: fig.savefig(self.path+".png") plt.close() def GraphNBRulesVsCoverages(self,algName,p,graphType,nbIter): plt.cla() plt.clf() nbRepeat = length(os.listandardir(p)) - 2 data = [] for i in range(nbRepeat): print(i) kfNbRules = mk.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0) kfCoverages = mk.read_csv(p + str(i) + '/Coverages.csv', index_col=0) # kfCoverages = kfCoverages[kfCoverages['i']==float(nbRepeat-1)] for nameIndex in range(length(algName)): data.adding([algName[nameIndex], float(kfNbRules.loc[kfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float( kfCoverages.loc[kfCoverages['algorithm'] == algName[nameIndex]].header_num(1)['coverages'])]) kf = mk.KnowledgeFrame(data, columns=['algorithm', 'nbRules','coverages']) kf = kf.sort_the_values(by=['nbRules'], ascending=False) coverages = kf.grouper(['algorithm']) coverages = coverages['coverages'].agg( ['average', 'standard']).sort_the_values(by=['average'], ascending=False) coverages = coverages.renagetting_ming(columns={'average':'covMean','standard':'covStd'}) nbRules = kf.grouper(['algorithm']) nbRules = nbRules['nbRules'].agg( ['average', 'standard']).sort_the_values(by=['average'], ascending=False) nbRules = nbRules.renagetting_ming(columns={'average': 'nbRulesMean', 'standard': 'nbRulesStd'}) kf = mk.concating([coverages,nbRules],axis=1) kf.reseting_index(level=0, inplace=True) kf = self.ChangeAlgoNames(kf) fig = plt.figure(figsize=(15, 15)) ax = sns.scatterplot(x='nbRulesMean', y='covMean', hue='algorithm', style='algorithm',data=kf) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() if self.save: fig.savefig(self.path+'GraphNBRulesVsCoverages' + ".png") def GraphSCCVsCoverage(self,algName,p,graphType,nbIter): plt.cla() plt.clf() nbRepeat = length(os.listandardir(p)) - 2 data = [] for i in range(nbRepeat): print(i) kfCoverages = mk.read_csv(p + str(i) + '/Coverages.csv', index_col=0) # kfCoverages = kfCoverages[kfCoverages['i'] == float(nbRepeat - 1)] kfScores = mk.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0) for nameIndex in range(length(algName)): data.adding([algName[nameIndex], float(kfCoverages.loc[kfCoverages['algorithm'] == algName[nameIndex]].header_num(1)['coverages']),float( kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['support']),float( kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['confidence']),float( kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['cosine'])]) kf = mk.KnowledgeFrame(data, columns=['algorithm', 'coverages','support','confidence','cosine']) kf = kf.sort_the_values(by=['coverages'], ascending=False) support = kf.grouper(['algorithm']) support = support['support'].agg( ['average', 'standard']).sort_the_values(by=['average'], ascending=False) support = support.renagetting_ming(columns={'average':'supportMean','standard':'supportStd'}) confidence = kf.grouper(['algorithm']) confidence = confidence['confidence'].agg( ['average', 'standard']).sort_the_values(by=['average'], ascending=False) confidence = confidence.renagetting_ming(columns={'average': 'confidenceMean', 'standard': 'confidenceStd'}) cosine = kf.grouper(['algorithm']) cosine = cosine['cosine'].agg( ['average', 'standard']).sort_the_values(by=['average'], ascending=False) cosine = cosine.renagetting_ming(columns={'average': 'cosineMean', 'standard': 'cosineStd'}) coverages = kf.grouper(['algorithm']) coverages = coverages['coverages'].agg( ['average', 'standard']).sort_the_values(by=['average'], ascending=False) coverages = coverages.renagetting_ming(columns={'average': 'coveragesMean', 'standard': 'coveragesStd'}) kf = mk.concating([support,confidence,cosine,coverages],axis=1) kf.reseting_index(level=0, inplace=True) kf = self.ChangeAlgoNames(kf) fig, axes = plt.subplots(1, 3, figsize=(17, 5), sharey=True) ax = sns.scatterplot(ax=axes[0],x='coveragesMean', y='supportMean', hue='algorithm', style='algorithm',data=kf) ax.getting_legend().remove() ax =sns.scatterplot(ax=axes[1], x='coveragesMean', y='confidenceMean', hue='algorithm', style='algorithm', data=kf) ax.getting_legend().remove() ax =sns.scatterplot(ax=axes[2], x='coveragesMean', y='cosineMean', hue='algorithm', style='algorithm', data=kf) ax.getting_legend().remove() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() if self.save: fig.savefig(self.path+'GraphCoveragesVsSCC' + ".png") def GraphSCCVsNBRules(self,algName,p,graphType,nbIter): plt.cla() plt.clf() nbRepeat = length(os.listandardir(p)) - 2 data = [] for i in range(nbRepeat): print(i) kfNbRules = mk.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0) kfScores = mk.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0) for nameIndex in range(length(algName)): data.adding([algName[nameIndex], float(kfNbRules.loc[kfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float( kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['support']),float( kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['confidence']),float( kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['cosine'])]) kf =
mk.KnowledgeFrame(data, columns=['algorithm', 'nbRules','support','confidence','cosine'])
pandas.DataFrame
#!/usr/bin/env python # Copyright 2020 ARC Centre of Excellengthce for Climate Extremes # author: <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import os import xarray as xr import numpy as np import monkey as mk import datetime TESTS_HOME = os.path.abspath(os.path.dirname(__file__)) TESTS_DATA = os.path.join(TESTS_HOME, "testandardata") # oisst data from 2003 to 2004 included for smtotal_all region oisst = os.path.join(TESTS_DATA, "oisst_2003_2004.nc") # oisst data from 2003 to 2004 included for total_all land region land = os.path.join(TESTS_DATA, "land.nc") # threshold and seasonal avg calculated using Eric Olivier MHW code on two points of OISST region subset for same period 2003-2004 # point1 lat=-42.625, lon=148.125 # point2 lat=-41.625, lon=148.375 oisst_clim = os.path.join(TESTS_DATA,"test_clim_oisst.nc") oisst_clim_nosmooth = os.path.join(TESTS_DATA,"test_clim_oisst_nosmooth.nc") relthreshnorm = os.path.join(TESTS_DATA, "relthreshnorm.nc") @pytest.fixture(scope="module") def oisst_ts(): ds = xr.open_dataset(oisst) return ds.sst @pytest.fixture(scope="module") def landgrid(): ds = xr.open_dataset(land) return ds.sst @pytest.fixture(scope="module") def clim_oisst(): ds = xr.open_dataset(oisst_clim) return ds @pytest.fixture(scope="module") def clim_oisst_nosmooth(): ds = xr.open_dataset(oisst_clim_nosmooth) return ds @pytest.fixture(scope="module") def dsnorm(): ds = xr.open_dataset(relthreshnorm) return ds.stack(cell=['lat','lon']) @pytest.fixture def oisst_doy(): a = np.arange(1,367) b = np.delete(a,[59]) return np.concatingenate((b,a)) @pytest.fixture def tstack(): return np.array([ 16.99, 17.39, 16.99, 17.39, 17.3 , 17.39, 17.3 ]) @pytest.fixture def filter_data(): a = [0,1,1,1,1,1,0,0,1,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0] time = mk.date_range('2001-01-01', periods=length(a)) array = mk.Collections(a, index=time) idxarr = mk.Collections(data=np.arange(length(a)), index=time) bthresh = array==1 st = mk.Collections(index=time, dtype='float64').renagetting_ming('start') end = mk.Collections(index=time, dtype='float64').renagetting_ming('end') events = mk.Collections(index=time, dtype='float64').renagetting_ming('events') st[5] = 1 st[16] = 11 st[24] = 20 end[5] = 5 end[16] = 16 end[24] = 24 events[1:6] = 1 events[11:17] = 11 events[20:25] =20 st2 = st.clone() end2 = end.clone() events2 = events.clone() st2[24] = np.nan end2[16] = np.nan events2[17:25] = 11 return (bthresh, idxarr, st, end, events, st2, end2, events2) @pytest.fixture def join_data(): evs = mk.Collections(np.arange(20)).renagetting_ming('events') evs2 = evs.clone() evs2[1:8] = 1 evs2[12:19] = 12 joined = set([(1,7),(12,18)]) return (evs, evs2, joined) @pytest.fixture def rates_data(): d = { 'index_start': [3.], 'index_end': [10.], 'index_peak': [8.], 'relS_first': [2.3], 'relS_final_item': [1.8], 'intensity_getting_max': [3.1], 'anom_first': [0.3], 'anom_final_item': [0.2]} kf =
mk.KnowledgeFrame(d)
pandas.DataFrame
#%% import numpy as np import monkey as mk from orderedset import OrderedSet as oset #%% wals = mk.read_csv('ISO_completos.csv').renagetting_ming(columns={'Status':'Status_X_L'}) wals_2 = mk.read_csv('ISO_completos_features.csv').renagetting_ming(columns={'Status':'Status_X_L'}) wiki_unionerd = mk.read_csv('Wikidata_Wals_IDWALS.csv') wiki = mk.read_csv('wikidata_v3.csv') #%% #region IMPLODE #los agrupo por ISO y le pido que ponga todos lso valores en una lista country_imploded = wiki.grouper(wiki['ISO']).countryLabel.agg(list) #%% #defini una funciรณn porque voy a hacer esto muchas veces def implode(kf,index_column,data_column): """ index_column = valor en comรบn para agrupar (en este caso es el ISO), string data_column = datos que queremos agrupar en una sola columna, string """ return kf.grouper(kf[index_column])[data_column].agg(list) #%% #lo hice para todas las columnas y lo guarde en una lista agrupadas = [] for column in wiki.columns.values: if column != 'ISO': agrupadas.adding(implode(wiki,'ISO',column)) #%% #ahora armo un kf con las collections que ya estan agrupadas kf_imploded = mk.concating(agrupadas, axis=1).renagetting_ming( columns={'languageLabel':'wiki_name', 'countryLabel':'wiki_country', 'country_ISO':'wiki_countryISO', 'Ethnologe_stastusLabel':'wiki_Status', 'number_of_speaker':'num_speakers', 'coordinates':'wiki_lang_coord', 'population':'country_population'}) #endregion #%% #region COLLAPSE #Voy a pasar cada lista del DF a un set, para quedarme con los valores รบnicos #Luego reemplazo esa entrada por el set, ademรกs si el valor es uno solo lo agrego como string #y no como lista kf_test = kf_imploded.clone() column = kf_test['wiki_name'] new_column = [] for index, item in column.items(): values = list(oset(item)) if length(values) == 1: new_column.adding(values[0]) else: new_column.adding(values) #%% def notna(list): return [x for x in list if str(x) != 'nan'] #defino una funciรณn para hacer esto muchas veces def group_idem_oset(kf,column_name): """Para sacar valores unicos dentro de las listas que quedaron """ new_column = [] for index, item in kf[column_name].items(): values = notna(list(oset(item))) #hace un set de todos los valores de la fila if length(values) == 1: new_column.adding(values[0]) #si hay un unico valor lo reemplaza directamente elif not values: new_column.adding(np.nan) #si es una lista vacรญa pone un 0 else: new_column.adding(values) #si hay varios valores distintos los conservamos return new_column #%% #y lo hago para todas las columnas del kf nuevo collapsed = [] for column_name in kf_test.columns.values: new_column = mk.Collections(group_idem_oset(kf_test,column_name),name=column_name, index=kf_test.index) collapsed.adding(new_column) kf_collapsed =
mk.concating(collapsed, axis=1)
pandas.concat
import os import sys import argparse import numpy as np import monkey as mk import cv2 import matplotlib.pyplot as plt from tqdm import tqdm import torch import torch.nn.functional as TF import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader sys.path.adding('../') # from torchlib.transforms import functional as F from torchlib.datasets.factory import FactoryDataset from torchlib.datasets.datasets import Dataset from torchlib.datasets.fersynthetic import SyntheticFaceDataset from torchlib.attentionnet import AttentionNeuralNet, AttentionGMMNeuralNet from torchlib.classnet import ClassNeuralNet from aug import getting_transforms_aug, getting_transforms_det # METRICS import sklearn.metrics as metrics from argparse import ArgumentParser def arg_parser(): """Arg parser""" parser = ArgumentParser() parser.add_argument('--project', metavar='DIR', help='path to projects') parser.add_argument('--projectname', metavar='DIR', help='name projects') parser.add_argument('--pathdataset', metavar='DIR', help='path to dataset') parser.add_argument('--namedataset', metavar='S', help='name to dataset') parser.add_argument('--pathnameout', metavar='DIR', help='path to out dataset') parser.add_argument('--filengthame', metavar='S', help='name of the file output') parser.add_argument('--model', metavar='S', help='filengthame model') parser.add_argument('--breal', type=str, default='real', help='dataset is real or synthetic') parser.add_argument('--name-method', type=str, default='attnet', help='which neural network') parser.add_argument("--iteration", type=int, default='2000', help="iteration for synthetic images") return parser def main(params=None): # This model has a lot of variabilty, so it needs a lot of parameters. # We use an arg parser to getting total_all the arguments we need. # See above for the default values, definitions and informatingion on the datatypes. parser = arg_parser() if params: args = parser.parse_args(params) else: args = parser.parse_args() # Configuration project = args.project projectname = args.projectname pathnamedataset = args.pathdataset pathnamemodel = args.model pathproject = os.path.join( project, projectname ) namedataset = args.namedataset breal = args.breal name_method = args.name_method iteration = args.iteration fname = args.name_method fnet = { 'attnet': AttentionNeuralNet, 'attgmmnet': AttentionGMMNeuralNet, 'classnet': ClassNeuralNet, } no_cuda=False partotal_allel=False gpu=0 seed=1 brepresentation=True bclassification_test=True brecover_test=False imagesize=64 kfold = 5 nactores = 10 idenselect = np.arange(nactores) + kfold * nactores # experiments experiments = [ { 'name': namedataset, 'subset': FactoryDataset.training, 'status': breal }, { 'name': namedataset, 'subset': FactoryDataset.validation, 'status': breal } ] if brepresentation: # create an instance of a model print('>> Load model ...') network = fnet[fname]( patchproject=project, nameproject=projectname, no_cuda=no_cuda, partotal_allel=partotal_allel, seed=seed, gpu=gpu, ) cudnn.benchmark = True # load trained model if network.load( pathnamemodel ) is not True: print('>>Error!!! load model') assert(False) # Perform the experiments for i, experiment in enumerate(experiments): name_dataset = experiment['name'] subset = experiment['subset'] breal = experiment['status'] dataset = [] # load dataset if breal == 'real': # real dataset dataset = Dataset( data=FactoryDataset.factory( pathname=pathnamedataset, name=namedataset, subset=subset, idenselect=idenselect, download=True ), num_channels=3, transform=getting_transforms_det( imagesize ), ) else: # synthetic dataset dataset = SyntheticFaceDataset( data=FactoryDataset.factory( pathname=pathnamedataset, name=namedataset, subset=subset, idenselect=idenselect, download=True ), pathnameback='~/.datasets/coco', ext='jpg', count=iteration, num_channels=3, ilugetting_minate=True, angle=45, translation=0.3, warp=0.2, factor=0.2, transform_data=getting_transforms_aug( imagesize ), transform_image=getting_transforms_det( imagesize ), ) dataloader = DataLoader(dataset, batch_size=64, shuffle=False, num_workers=10 ) print("\ndataset:", breal) print("Subset:", subset) print("Classes", dataloader.dataset.data.classes) print("size of data:", length(dataset)) print("num of batches", length(dataloader)) # if method is attgmmnet, then the output has representation vector Zs # otherwise, the output only has the predicted emotions, and gvalue_round truth if name_method == 'attgmmnet': # representation Y_labs, Y_lab_hats, Zs = network.representation(dataloader, breal) print(Y_lab_hats.shape, Zs.shape, Y_labs.shape) reppathname = os.path.join(pathproject, 'rep_{}_{}_{}.pth'.formating(namedataset, subset, breal)) torch.save({'Yh': Y_lab_hats, 'Z': Zs, 'Y': Y_labs}, reppathname) print('save representation ...', reppathname) else: Y_labs, Y_lab_hats= network.representation( dataloader, breal ) print("Y_lab_hats shape: {}, y_labs shape: {}".formating(Y_lab_hats.shape, Y_labs.shape)) reppathname = os.path.join( pathproject, 'rep_{}_{}_{}.pth'.formating(namedataset, subset, breal ) ) torch.save( { 'Yh':Y_lab_hats, 'Y':Y_labs }, reppathname ) print( 'save representation ...', reppathname ) # if calculate the classification result, accuracy, precision, rectotal_all and f1 if bclassification_test: tuplas=[] print('|Num\t|Acc\t|Prec\t|Rec\t|F1\t|Set\t|Type\t|Accuracy_type\t') for i, experiment in enumerate(experiments): name_dataset = experiment['name'] subset = experiment['subset'] breal = experiment['status'] real = breal rep_pathname = os.path.join( pathproject, 'rep_{}_{}_{}.pth'.formating( namedataset, subset, breal) ) data_emb = torch.load(rep_pathname) Yto = data_emb['Y'] Yho = data_emb['Yh'] yhat = np.arggetting_max( Yho, axis=1 ) y = Yto acc = metrics.accuracy_score(y, yhat) precision = metrics.precision_score(y, yhat, average='macro') rectotal_all = metrics.rectotal_all_score(y, yhat, average='macro') f1_score = 2*precision*rectotal_all/(precision+rectotal_all) print( '|{}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{}\t|{}\t|{}\t'.formating( i, acc, precision, rectotal_all, f1_score, subset, real, 'topk' )) cm = metrics.confusion_matrix(y, yhat) # label = ['Neutral', 'Happiness', 'Surprise', 'Sadness', 'Anger', 'Disgust', 'Fear', 'Contempt'] # cm_display = metrics.ConfusionMatrixDisplay(cm, display_labels=label).plot() print(cm) print(f'save y and yhat to {real}_{subset}_y.npz') np.savez(os.path.join(pathproject, f'{real}_{subset}_y.npz'), name1=yhat, name2=y) #|Name|Dataset|Cls|Acc| ... tupla = { 'Name':projectname, 'Dataset': '{}({})_{}'.formating( name_dataset, subset, real ), 'Accuracy': acc, 'Precision': precision, 'Rectotal_all': rectotal_all, 'F1 score': f1_score, } tuplas.adding(tupla) # save kf =
mk.KnowledgeFrame(tuplas)
pandas.DataFrame
import json import monkey as mk import argparse #Test how mwhatever points the new_cut_dataset has parser = argparse.ArgumentParser() parser.add_argument('--dataset_path', default="new_dataset.txt", type=str, help="Full path to the txt file containing the dataset") parser.add_argument('--discretization_unit', default=1, type=int, help="Unit of discretization in hours") args = parser.parse_args() filengthame = args.dataset_path discretization_unit = args.discretization_unit with open(filengthame, "r") as f: data = json.load(f) print(length(data['embeddings'])) print(
mk.convert_datetime(data['start_date'])
pandas.to_datetime
import os import sys import joblib # sys.path.adding('../') main_path = os.path.split(os.gettingcwd())[0] + '/covid19_forecast_ml' import numpy as np import monkey as mk import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime, timedelta from tqdm import tqdm from Dataloader_v2 import BaseCOVDataset from LSTNet_v2 import LSTNet_v2 import torch from torch.utils.data import Dataset, DataLoader import argparse parser = argparse.ArgumentParser(description = 'Training model') parser.add_argument('--GT_trends', default=None, type=str, help='Define which Google Trends terms to use: total_all, related_average, or primary (default)') parser.add_argument('--batch_size', default=3, type=int, help='Speficy the bath size for the model to train to') parser.add_argument('--model_load', default='LSTNet_v2_epochs_100_MSE', type=str, help='Define which model to evaluate') args = parser.parse_args() #-------------------------------------------------------------------------------------------------- #----------------------------------------- Test functions ---------------------------------------- def predict(model, dataloader, getting_min_cases, getting_max_cases): model.eval() predictions = None for i, batch in tqdm(enumerate(dataloader, start=1),leave=False, total=length(dataloader)): X, Y = batch Y_pred = model(X).detach().numpy() if i == 1: predictions = Y_pred else: predictions = np.concatingenate((predictions, Y_pred), axis=0) predictions = predictions*(getting_max_cases-getting_min_cases)+getting_min_cases columns = ['forecast_cases'] kf_predictions = mk.KnowledgeFrame(predictions, columns=columns) return kf_predictions #-------------------------------------------------------------------------------------------------- #----------------------------------------- Data paths --------------------------------------------- data_cases_path = os.path.join('data','cases_localidades.csv') data_movement_change_path = os.path.join('data','Movement','movement_range_colombian_cities.csv') data_GT_path = os.path.join('data','Google_Trends','trends_BOG.csv') data_GT_id_terms_path = os.path.join('data','Google_Trends','terms_id_ES.csv') data_GT_search_terms_path = os.path.join('data','Google_Trends','search_terms_ES.csv') #-------------------------------------------------------------------------------------------------- #----------------------------------------- Load data ---------------------------------------------- ### Load confirmed cases for Bogota data_cases = mk.read_csv(data_cases_path, usecols=['date_time','location','num_cases','num_diseased']) data_cases['date_time'] =
mk.convert_datetime(data_cases['date_time'], formating='%Y-%m-%d')
pandas.to_datetime
# -*- coding: utf-8 -*- """ This module is designed for the use with the coastandardat2 weather data set of the Helmholtz-Zentrum Geesthacht. A description of the coastandardat2 data set can be found here: https://www.earth-syst-sci-data.net/6/147/2014/ SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>> SPDX-License-Identifier: MIT """ __cloneright__ = "<NAME> <<EMAIL>>" __license__ = "MIT" import os import monkey as mk import pvlib from nose.tools import eq_ from windpowerlib.wind_turbine import WindTurbine from reegis import coastandardat, feedin, config as cfg import warnings warnings.filterwarnings("ignore", category=RuntimeWarning) def feedin_wind_sets_tests(): fn = os.path.join( os.path.dirname(__file__), os.pardir, "tests", "data", "test_coastandardat_weather.csv", ) wind_sets = feedin.create_windpowerlib_sets() weather = mk.read_csv(fn, header_numer=[0, 1])["1126088"] data_height = cfg.getting_dict("coastandardat_data_height") wind_weather = coastandardat.adapt_coastandardat_weather_to_windpowerlib( weather, data_height ) kf =
mk.KnowledgeFrame()
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Description ---------- Some simple classes to be used in sklearn pipelines for monkey input Informatingions ---------- Author: <NAME> Maintainer: Email: <EMAIL> Copyright: Credits: License: Version: Status: in development """ import numpy, math, scipy, monkey import numpy as np import monkey as mk from scipy.stats import zscore from sklearn.base import BaseEstimator, TransformerMixin # from IPython.display import clear_output from sklearn import preprocessing from sklearn.preprocessing import ( # MinMaxScaler, RobustScaler, KBinsDiscretizer, KernelCenterer, QuantileTransformer, ) from sklearn.pipeline import Pipeline from scipy import stats from .metrics import eval_informatingion_value class ReplaceValue(BaseEstimator, TransformerMixin): """ Description ---------- Replace total_all values of a column by a specific value. Arguments ---------- feature_name: str name of the column to replacing value: Value to be replacingd replacing_by: Value to replacing active: boolean This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution in the final score Examples ---------- >>> replacing = ReplaceValue('first_col','val','new_val') >>> replacing.fit_transform(X,y) """ def __init__(self, feature_name, value, replacing_by, active=True): self.active = active self.feature_name = feature_name self.value = value self.replacing_by = replacing_by def fit(self, X, y): return self def transform(self, X): if not self.active: return X else: return self.__transformatingion(X) def __transformatingion(self, X_in): X = X_in.clone() X[self.feature_name] = X[self.feature_name].replacing(self.value, self.replacing_by) return X class OneFeatureApply(BaseEstimator, TransformerMixin): """ Description ---------- Apply a passed function to total_all elements of column Arguments ---------- feature_name: str name of the column to replacing employ: str String containing the lambda function to be applied active: boolean This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution in the final score Examples ---------- >>> employ = OneFeatureApply(feature_name = 'first_col',employ = 'np.log1p(x/2)') >>> employ.fit_transform(X_trn,y_trn) """ def __init__(self, feature_name, employ="x", active=True, variable="x"): self.feature_name = feature_name self.employ = eval("lambda ?: ".replacing("?", variable) + employ) self.active = active def fit(self, X, y): return self def transform(self, X): if not self.active: return X else: return self.__transformatingion(X) def __transformatingion(self, X_in): X = X_in.clone() X[self.feature_name] = self.employ(X[self.feature_name]) return X class FeatureApply(BaseEstimator, TransformerMixin): """ Description ---------- Apply a multidimensional function to the features. Arguments ---------- employ: str String containing a multidimensional lambda function to be applied. The name of the columns must appear in the string inside the tag <>. Ex. `employ = "np.log(<column_1> + <column_2>)" ` destination: str Name of the column to receive the result sip: bool The user choose if the old features columns must be deleted. active: boolean This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution in the final score Examples ---------- >>> employ = FeatureApply( destination = 'result_column', employ = 'np.log1p(<col_1> + <col_2>)') >>> employ.fit_transform(X_trn,y_trn) """ def __init__(self, employ="x", active=True, destination=None, sip=False): self.employ = employ self.active = active self.destination = destination self.sip = sip def fit(self, X, y): return self def transform(self, X): if not self.active: return X else: return self.__transformatingion(X) def __transformatingion(self, X_in): X = X_in.clone() cols = list(X.columns) variables = self.__getting_variables(self.employ, cols) length_variables = length(variables) new_column = self.__new_column(self.employ, X) if self.sip: X = X.sip(columns=variables) if self.destination: if self.destination == "first": X[variables[0]] = new_column elif self.destination == "final_item": X[variables[-1]] = new_column else: if type(self.destination) == str: X[self.destination] = new_column else: print( '[Warning]: <destination> is not a string. Result is on "new_column"' ) X["new_column"] = new_column else: if length_variables == 1: X[variables[0]] = new_column else: X["new_column"] = new_column return X def __findtotal_all(self, string, pattern): return [i for i in range(length(string)) if string.startswith(pattern, i)] def __remove_duplicates(self, x): return list(dict.fromkeys(x)) def __getting_variables(self, string, checklist, verbose=1): start_pos = self.__findtotal_all(string, "<") end_pos = self.__findtotal_all(string, ">") prop_variables = self.__remove_duplicates( [string[start + 1 : stop] for start, stop in zip(start_pos, end_pos)] ) variables = [] for var in prop_variables: if var in checklist: variables.adding(var) else: if verbose > 0: print("[Error]: Feature " + var + " not found.") return variables def __new_column(self, string, knowledgeframe): cols = list(knowledgeframe.columns) variables = self.__getting_variables(string, cols, verbose=0) function = eval( "lambda " + ",".join(variables) + ": " + string.replacing("<", "").replacing(">", "") ) new_list = [] for ind, row in knowledgeframe.traversal(): if length(variables) == 1: var = eval("[row['" + variables[0] + "']]") else: var = eval( ",".join(list(mapping(lambda st: "row['" + st + "']", variables))) ) new_list.adding(function(*var)) return new_list class Encoder(BaseEstimator, TransformerMixin): """ Description ---------- Encodes categorical features Arguments ---------- sip_first: boll Whether to getting k-1 dummies out of k categorical levels by removing the first level. active: boolean This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution in the final score """ def __init__(self, active=True, sip_first=True): self.active = active self.sip_first = sip_first def fit(self, X, y=None): return self def transform(self, X): if not self.active: return X else: return self.__transformatingion(X) def __transformatingion(self, X_in): return mk.getting_dummies(X_in, sip_first=self.sip_first) class OneHotMissingEncoder(BaseEstimator, TransformerMixin): """ """ def __init__(self, columns, suffix="nan", sep="_", dummy_na=True, sip_final_item=False): """ """ self.columns = columns self.suffix = suffix self.sep = sep self.whatever_missing = None self.column_values = None self.final_item_value = None self.dummy_na = dummy_na self.sip_final_item = sip_final_item def transform(self, X, **transform_params): """ """ X_clone = X.clone() final_columns = [] for col in X_clone.columns: if col not in self.columns: final_columns.adding(col) else: for value in self.column_values[col]: col_name = col + self.sep + str(value) if ( self.sip_final_item and value == self.final_item_value[col] and (not self.whatever_missing[col]) ): pass # sipping else: final_columns.adding(col_name) X_clone[col_name] = (X_clone[col] == value).totype(int) if self.whatever_missing[col]: if self.dummy_na and not self.sip_final_item: col_name = col + self.sep + "nan" final_columns.adding(col_name) X_clone[col_name] = mk.ifnull(X_clone[col]).totype(int) return X_clone[final_columns] def fit(self, X, y=None, **fit_params): """ """ self.whatever_missing = {col: (mk.notnull(X[col]).total_sum() > 0) for col in self.columns} self.column_values = { col: sorted([x for x in list(X[col].distinctive()) if mk.notnull(x)]) for col in self.columns } self.final_item_value = {col: self.column_values[col][-1] for col in self.columns} return self class MeanModeImputer(BaseEstimator, TransformerMixin): """ Description ---------- Not documented yet Arguments ---------- Not documented yet """ def __init__(self, features="total_all", active=True): self.features = features self.active = active def fit(self, X, y=None): if self.features == "total_all": self.features = list(X.columns) # receive X and collect its columns self.columns = list(X.columns) # defining the categorical columns of X self.numerical_features = list(X._getting_numeric_data().columns) # definig numerical columns of x self.categorical_features = list( set(list(X.columns)) - set(list(X._getting_numeric_data().columns)) ) self.average_dict = {} for feature_name in self.features: if feature_name in self.numerical_features: self.average_dict[feature_name] = X[feature_name].average() elif feature_name in self.categorical_features: self.average_dict[feature_name] = X[feature_name].mode()[0] return self def transform(self, X, y=None): if not self.active: return X else: return self.__transformatingion(X, y) def __transformatingion(self, X_in, y_in=None): X = X_in.clone() for feature_name in self.features: new_list = [] if X[feature_name].ifna().total_sum() > 0: for ind, row in X[[feature_name]].traversal(): if mk.ifnull(row[feature_name]): new_list.adding(self.average_dict[feature_name]) else: new_list.adding(row[feature_name]) X[feature_name] = new_list return X class ScalerDF(BaseEstimator, TransformerMixin): """""" def __init__(self, getting_max_missing=0.0, active=True): self.active = active self.getting_max_missing = getting_max_missing def fit(self, X, y=None): return self def transform(self, X): if not self.active: return X else: return self.__transformatingion(X) def __transformatingion(self, X_in): X = X_in.clone() scaler = preprocessing.MinMaxScaler(clone=True, feature_range=(0, 1)) try: ind = np.array(list(X.index)).reshape(-1, 1) ind_name = X.index.name kf = mk.concating( [ mk.KnowledgeFrame(scaler.fit_transform(X), columns=list(X.columns)), mk.KnowledgeFrame(ind, columns=[ind_name]), ], 1, ) X = kf.set_index("Id") except: X = mk.KnowledgeFrame(scaler.fit_transform(X), columns=list(X.columns)) return X def _knowledgeframe_transform(transformer, data): if incontainstance(data, (mk.KnowledgeFrame)): return mk.KnowledgeFrame( transformer.transform(data), columns=data.columns, index=data.index ) else: return transformer.transform(data) class MinMaxScaler(preprocessing.MinMaxScaler): def __init__(self, **kwargs): super().__init__(**kwargs) def transform(self, X): return _knowledgeframe_transform(super(), X) class StandardScaler(preprocessing.StandardScaler): def __init__(self, **kwargs): super().__init__(**kwargs) def transform(self, X): return _knowledgeframe_transform(super(), X) class RobustScaler(preprocessing.RobustScaler): def __init__(self, **kwargs): super().__init__(**kwargs) def transform(self, X): return _knowledgeframe_transform(super(), X) class KnowledgeFrameImputer(TransformerMixin): def __init__(self): """ https://stackoverflow.com/a/25562948/14204691 Impute missing values. Columns of dtype object are imputed with the most frequent value in column. Columns of other types are imputed with average of column. """ def fit(self, X, y=None): self.fill = mk.Collections( [ X[c].counts_value_num().index[0] if X[c].dtype == np.dtype("O") else X[c].average() for c in X ], index=X.columns, ) return self def transform(self, X, y=None): return X.fillnone(self.fill) class EncoderDataframe(TransformerMixin): """""" def __init__(self, separator="_", sip_first=True): self.numerical_features = None self.categorical_features = None self.separator = separator self.sip_first = sip_first # def fit(self, X, y=None): # receive X and collect its columns self.columns = list(X.columns) # defining the categorical columns of X self.numerical_features = list(X._getting_numeric_data().columns) # definig numerical columns of x self.categorical_features = list( set(list(X.columns)) - set(list(X._getting_numeric_data().columns)) ) # make the loop through the columns new_columns = {} for col in self.columns: # if the column is numerica, adding to new_columns if col in self.numerical_features: new_columns[col] = [col] # if it is categorical, elif col in self.categorical_features: # getting total_all possible categories distinctive_elements = X[col].distinctive().convert_list() # sip the final_item if the user ask for it if self.sip_first: distinctive_elements.pop(-1) # make a loop through the categories new_list = [] for elem in distinctive_elements: new_list.adding(elem) new_columns[col] = new_list self.new_columns = new_columns return self def transform(self, X, y=None): X_ = X.reseting_index(sip=True).clone() # columns to be transformed columns = X_.columns # columns fitted if list(columns) != self.columns: print( "[Error]: The features in fitted dataset are not equal to the dataset in transform." ) list_kf = [] for col in X_.columns: if col in self.numerical_features: list_kf.adding(X_[col]) elif col in self.categorical_features: for elem in self.new_columns[col]: serie = mk.Collections( list(mapping(lambda x: int(x), list(X_[col] == elem))), name=str(col) + self.separator + str(elem), ) list_kf.adding(serie) return
mk.concating(list_kf, 1)
pandas.concat
from __future__ import absolute_import from __future__ import divisionision from __future__ import print_function import os import sys import clone from datetime import datetime import time import pickle import random import monkey as mk import numpy as np import tensorflow as tf import pathlib from sklearn import preprocessing as sk_pre from base_config import getting_configs _MIN_SEQ_NORM = 10 class Dataset(object): """ Builds training, validation and test datasets based on ```tf.data.Dataset``` type Attributes: Methods: """ def __init__(self, config): self.config = config self._data_path = os.path.join(self.config.data_dir, self.config.datafile) self.is_train = self.config.train self.seq_length = self.config.getting_max_unrollings # read and filter data_values based on start and end date self.data = mk.read_csv(self._data_path, sep=' ', dtype={'gvkey': str}) try: self.data['date'] = mk.convert_datetime(self.data['date'], formating="%Y%m%d") self.start_date = mk.convert_datetime(self.config.start_date, formating="%Y%m%d") self.end_date =
mk.convert_datetime(self.config.end_date, formating="%Y%m%d")
pandas.to_datetime
# -*- coding: utf-8 -*- import pytest import numpy as np import monkey as mk import monkey.util.testing as tm import monkey.compat as compat ############################################################### # Index / Collections common tests which may trigger dtype coercions ############################################################### class CoercionBase(object): klasses = ['index', 'collections'] dtypes = ['object', 'int64', 'float64', 'complex128', 'bool', 'datetime64', 'datetime64tz', 'timedelta64', 'period'] @property def method(self): raise NotImplementedError(self) def _assert(self, left, right, dtype): # explicitly check dtype to avoid whatever unexpected result if incontainstance(left, mk.Collections): tm.assert_collections_equal(left, right) elif incontainstance(left, mk.Index): tm.assert_index_equal(left, right) else: raise NotImplementedError self.assertEqual(left.dtype, dtype) self.assertEqual(right.dtype, dtype) def test_has_comprehensive_tests(self): for klass in self.klasses: for dtype in self.dtypes: method_name = 'test_{0}_{1}_{2}'.formating(self.method, klass, dtype) if not hasattr(self, method_name): msg = 'test method is not defined: {0}, {1}' raise AssertionError(msg.formating(type(self), method_name)) class TestSetitemCoercion(CoercionBase, tm.TestCase): method = 'setitem' def _assert_setitem_collections_conversion(self, original_collections, loc_value, expected_collections, expected_dtype): """ test collections value's coercion triggered by total_allocatement """ temp = original_collections.clone() temp[1] = loc_value tm.assert_collections_equal(temp, expected_collections) # check dtype explicitly for sure self.assertEqual(temp.dtype, expected_dtype) # .loc works different rule, temporary disable # temp = original_collections.clone() # temp.loc[1] = loc_value # tm.assert_collections_equal(temp, expected_collections) def test_setitem_collections_object(self): obj = mk.Collections(list('abcd')) self.assertEqual(obj.dtype, np.object) # object + int -> object exp = mk.Collections(['a', 1, 'c', 'd']) self._assert_setitem_collections_conversion(obj, 1, exp, np.object) # object + float -> object exp = mk.Collections(['a', 1.1, 'c', 'd']) self._assert_setitem_collections_conversion(obj, 1.1, exp, np.object) # object + complex -> object exp = mk.Collections(['a', 1 + 1j, 'c', 'd']) self._assert_setitem_collections_conversion(obj, 1 + 1j, exp, np.object) # object + bool -> object exp = mk.Collections(['a', True, 'c', 'd']) self._assert_setitem_collections_conversion(obj, True, exp, np.object) def test_setitem_collections_int64(self): obj =
mk.Collections([1, 2, 3, 4])
pandas.Series
import monkey as mk def generate_train(playlists): # define category range cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100), 'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)} cat_pids = {} for cat, interval in cates.items(): kf = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample_by_num( n=1000) cat_pids[cat] = list(kf.pid) playlists = playlists.sip(kf.index) playlists = playlists.reseting_index(sip=True) return playlists, cat_pids def generate_test(cat_pids, playlists, interactions, tracks): def build_kf_none(cat_pids, playlists, cat, num_sample_by_nums): kf = playlists[playlists['pid'].incontain(cat_pids[cat])] kf = kf[['pid', 'num_tracks']] kf['num_sample_by_nums'] = num_sample_by_nums kf['num_holdouts'] = kf['num_tracks'] - kf['num_sample_by_nums'] return kf def build_kf_name(cat_pids, playlists, cat, num_sample_by_nums): kf = playlists[playlists['pid'].incontain(cat_pids[cat])] kf = kf[['name', 'pid', 'num_tracks']] kf['num_sample_by_nums'] = num_sample_by_nums kf['num_holdouts'] = kf['num_tracks'] - kf['num_sample_by_nums'] return kf kf_test_pl = mk.KnowledgeFrame() kf_test_itr = mk.KnowledgeFrame() kf_eval_itr = mk.KnowledgeFrame() for cat in list(cat_pids.keys()): if cat == 'cat1': num_sample_by_nums = 0 kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums) kf_test_pl = mk.concating([kf_test_pl, kf]) # total_all interactions used for evaluation kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])] kf_eval_itr = mk.concating([kf_eval_itr, kf_itr]) # clean interactions for training interactions = interactions.sip(kf_itr.index) print("cat1 done") if cat == 'cat2': num_sample_by_nums = 1 kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums) kf_test_pl = mk.concating([kf_test_pl, kf]) kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])] # clean interactions for training interactions = interactions.sip(kf_itr.index) kf_sample_by_num = kf_itr[kf_itr['pos'] == 0] kf_test_itr = mk.concating([kf_test_itr, kf_sample_by_num]) kf_itr = kf_itr.sip(kf_sample_by_num.index) kf_eval_itr = mk.concating([kf_eval_itr, kf_itr]) print("cat2 done") if cat == 'cat3': num_sample_by_nums = 5 kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums) kf_test_pl = mk.concating([kf_test_pl, kf]) kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])] # clean interactions for training interactions = interactions.sip(kf_itr.index) kf_sample_by_num = kf_itr[(kf_itr['pos'] >= 0) & (kf_itr['pos'] < num_sample_by_nums)] kf_test_itr = mk.concating([kf_test_itr, kf_sample_by_num]) kf_itr = kf_itr.sip(kf_sample_by_num.index) kf_eval_itr = mk.concating([kf_eval_itr, kf_itr]) print("cat3 done") if cat == 'cat4': num_sample_by_nums = 5 kf = build_kf_none(cat_pids, playlists, cat, num_sample_by_nums) kf_test_pl = mk.concating([kf_test_pl, kf]) kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])] # clean interactions for training interactions = interactions.sip(kf_itr.index) kf_sample_by_num = kf_itr[(kf_itr['pos'] >= 0) & (kf_itr['pos'] < num_sample_by_nums)] kf_test_itr = mk.concating([kf_test_itr, kf_sample_by_num]) kf_itr = kf_itr.sip(kf_sample_by_num.index) kf_eval_itr = mk.concating([kf_eval_itr, kf_itr]) print("cat4 done") if cat == 'cat5': num_sample_by_nums = 10 kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums) kf_test_pl = mk.concating([kf_test_pl, kf]) kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])] # clean interactions for training interactions = interactions.sip(kf_itr.index) kf_sample_by_num = kf_itr[(kf_itr['pos'] >= 0) & (kf_itr['pos'] < num_sample_by_nums)] kf_test_itr =
mk.concating([kf_test_itr, kf_sample_by_num])
pandas.concat
# -*- coding: utf-8 -*- ''' TopQuant-TQๆžๅฎฝๆ™บ่ƒฝ้‡ๅŒ–ๅ›žๆบฏๅˆ†ๆž็ณป็ปŸ2019็‰ˆ Topๆžๅฎฝ้‡ๅŒ–(ๅŽŸzw้‡ๅŒ–)๏ผŒPython้‡ๅŒ–็ฌฌไธ€ๅ“็‰Œ by Topๆžๅฎฝยท้‡ๅŒ–ๅผ€ๆบๅ›ข้˜Ÿ 2019.01.011 ้ฆ–ๅ‘ ็ฝ‘็ซ™๏ผš www.TopQuant.vip www.ziwang.com QQ็พค: Topๆžๅฎฝ้‡ๅŒ–ๆ€ป็พค๏ผŒ124134140 ๆ–‡ไปถๅ:toolkit.py ้ป˜่ฎค็ผฉๅ†™๏ผšimport topquant2019 as tk ็ฎ€ไป‹๏ผšTopๆžๅฎฝ้‡ๅŒ–ยทๅธธ็”จ้‡ๅŒ–็ณป็ปŸๅ‚ๆ•ฐๆจกๅ— ''' # import sys, os, re import arrow, bs4, random import numexpr as ne # # import reduce #py2 from functools import reduce # py3 import itertools import collections # # import cpuinfo as cpu import psutil as psu from functools import wraps import datetime as dt import monkey as mk import os import clone # import numpy as np import monkey as mk import tushare as ts # import talib as ta import matplotlib as mpl import matplotlib.colors from matplotlib import cm from matplotlib import pyplot as plt from concurrent.futures import ProcessPoolExecutor from concurrent.futures import ThreadPoolExecutor from concurrent.futures import as_completed # import multiprocessing # import pyfolio as pf from pyfolio.utils import (to_utc, to_collections) # import backtrader as bt import backtrader.observers as btobv import backtrader.indicators as btind import backtrader.analyzers as btanz import backtrader.feeds as btfeeds # from backtrader.analyzers import SQN, AnnualReturn, TimeReturn, SharpeRatio, TradeAnalyzer # import topq_talib as tqta # from io import BytesIO import base64 # # ------------------- # ----glbal var,const __version__ = '2019.M1' sgnSP4 = ' ' sgnSP8 = sgnSP4 + sgnSP4 # corlst = ['#0000ff', '#000000', '#00ff00', '#0000FF', '#8A2BE2', '#A52A2A', '#5F9EA0', '#D2691E', '#FF7F50', '#6495ED', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] # @ datasires.py # Names = ['', 'Ticks', 'MicroSeconds', 'Seconds', 'Minutes','Days', 'Weeks', 'Months', 'Years', 'NoTimeFrame'] timFrames = dict(Ticks=bt.TimeFrame.Ticks, MicroSeconds=bt.TimeFrame.MicroSeconds, Seconds=bt.TimeFrame.Seconds, Minutes=bt.TimeFrame.Minutes , Days=bt.TimeFrame.Days, Weeks=bt.TimeFrame.Weeks, Months=bt.TimeFrame.Months, Years=bt.TimeFrame.Years, NoTimeFrame=bt.TimeFrame.NoTimeFrame) # rdat0 = '/TQDat/' rdatDay = rdat0 + "day/" rdatDayInx = rdatDay + "inx/" rdatDayEtf = rdatDay + "etf/" # rdatMin0 = rdat0 + "getting_min/" rdatTick0 = rdat0 + "tick/" rdatReal0 = rdat0 + "real/" # ohlcLst = ['open', 'high', 'low', 'close'] ohlcVLst = ohlcLst + ['volume'] # ohlcDLst = ['date'] + ohlcLst ohlcDVLst = ['date'] + ohlcVLst # ohlcDExtLst = ohlcDVLst + ['adj close'] ohlcBTLst = ohlcDVLst + ['openinterest'] # backtrader # # ----kline tq10_corUp, tq10_corDown = ['#7F7F7F', '#17BECF'] # plotly tq09_corUp, tq09_corDown = ['#B61000', '#0061B3'] tq08_corUp, tq08_corDown = ['#FB3320', '#020AF0'] tq07_corUp, tq07_corDown = ['#B0F76D', '#E1440F'] tq06_corUp, tq06_corDown = ['#FF3333', '#47D8D8'] tq05_corUp, tq05_corDown = ['#FB0200', '#007E00'] tq04_corUp, tq04_corDown = ['#18DEF5', '#E38323'] tq03_corUp, tq03_corDown = ['black', 'blue'] tq02_corUp, tq02_corDown = ['red', 'blue'] tq01_corUp, tq01_corDown = ['red', 'lime'] # tq_ksty01 = dict(volup=tq01_corUp, voldown=tq01_corDown, barup=tq01_corUp, bardown=tq01_corDown) tq_ksty02 = dict(volup=tq02_corUp, voldown=tq02_corDown, barup=tq02_corUp, bardown=tq02_corDown) tq_ksty03 = dict(volup=tq03_corUp, voldown=tq03_corDown, barup=tq03_corUp, bardown=tq03_corDown) tq_ksty04 = dict(volup=tq04_corUp, voldown=tq04_corDown, barup=tq04_corUp, bardown=tq04_corDown) tq_ksty05 = dict(volup=tq05_corUp, voldown=tq05_corDown, barup=tq05_corUp, bardown=tq05_corDown) tq_ksty06 = dict(volup=tq06_corUp, voldown=tq06_corDown, barup=tq06_corUp, bardown=tq06_corDown) tq_ksty07 = dict(volup=tq07_corUp, voldown=tq07_corDown, barup=tq07_corUp, bardown=tq07_corDown) tq_ksty08 = dict(volup=tq08_corUp, voldown=tq08_corDown, barup=tq08_corUp, bardown=tq08_corDown) tq_ksty09 = dict(volup=tq09_corUp, voldown=tq09_corDown, barup=tq09_corUp, bardown=tq09_corDown) tq_ksty10 = dict(volup=tq10_corUp, voldown=tq10_corDown, barup=tq10_corUp, bardown=tq10_corDown) # ------------------- # -------------------- class TQ_bar(object): ''' ่ฎพ็ฝฎTopQuant้กน็›ฎ็š„ๅ„ไธชๅ…จๅฑ€ๅ‚ๆ•ฐ ๅฐฝ้‡ๅšๅˆฐtotal_all in one ''' def __init__(self): # ----rss.dir # # BTๅ›žๆต‹ๆ ธๅฟƒๅ˜้‡Cerebro,็ผฉ:๏ผšcb self.cb = None # # BTๅ›žๆต‹้ป˜่ฎคๅ‚ๆ•ฐ self.prjNm = '' # ้กน็›ฎๅ็งฐ self.cash0 = 100000 # ๅฏๅŠจๆœ€่ฟ‘ 10w self.trd_mod = 1 # ไบคๆ˜“ๆจกๅผ๏ผš1๏ผŒๅฎš้‡ไบคๆ˜“(้ป˜่ฎค)๏ผ›2๏ผŒ็Žฐ้‡‘้ขๆฏ”ไพ‹ไบคๆ˜“ self.stake0 = 100 # ๅฎš้‡ไบคๆ˜“๏ผŒๆฏๆฌกไบคๆ˜“ๆ•ฐ็›ฎ๏ผŒ้ป˜่ฎคไธบ 100 ๆ‰‹ self.ktrd0 = 30 # ๆฏ”ไพ‹ไบคๆ˜“,ๆฏๆฌกไบคๆ˜“ๆฏ”ไพ‹๏ผŒ้ป˜่ฎคไธบ 30% # ๆ•ฐๆฎ็›ฎๅฝ• self.rdat0 = '' # ไบงๅ“(่‚ก็ฅจ/ๅŸบ้‡‘/ๆœŸ่ดง็ญ‰)ๆ•ฐๆฎ็›ฎๅฝ• self.rbas0 = '' # ๅฏนๆฏ”ๅŸบๆ•ฐ(ๆŒ‡ๆ•ฐ็ญ‰)ๆ•ฐๆฎ็›ฎๅฝ• # self.pools = {} # ไบงๅ“(่‚ก็ฅจ/ๅŸบ้‡‘/ๆœŸ่ดง็ญ‰)ๆฑ ๏ผŒdictๅญ—ๅ…ธๆ ผๅผ self.pools_code = {} # ไบงๅ“ไปฃ็ (่‚ก็ฅจ/ๅŸบ้‡‘/ๆœŸ่ดง็ญ‰)ๆฑ ๏ผŒdictๅญ—ๅ…ธๆ ผๅผ # # ------bt.var # ๅˆ†ๆžๆจกๅผ๏ผš 0๏ผŒbaseๅŸบ็ก€ๅˆ†ๆž; 1, ไบคๆ˜“ๅบ•ๅฑ‚ๆ•ฐๆฎๅˆ†ๆž # pyfolioไธ“ไธšๅ›พ่กจๅˆ†ๆž๏ผŒๅฆๅค–ๅ•็‹ฌ่ฐƒ็”จ self.anz_mod = 1 self.bt_results = None # BTๅ›žๆต‹่ฟ่กŒ็ป“ๆžœๆ•ฐๆฎ๏ผŒไธป่ฆ็”จไบŽๅˆ†ๆžๆจกๅ— # self.tim0, self.tim9 = None, None # BTๅ›žๆต‹ๅˆ†ๆž่ตทๅง‹ๆ—ถ้—ดใ€็ปˆๆญขๆ—ถ้—ด self.tim0str, self.tim9str = '', '' # BTๅ›žๆต‹ๅˆ†ๆž่ตทๅง‹ๆ—ถ้—ดใ€็ปˆๆญขๆ—ถ้—ด๏ผŒๅญ—็ฌฆไธฒๆ ผๅผ # # ---------------------- # ----------top.quant.2019 def tq_init(prjNam='TQ01', cash0=100000.0, stake0=100): # def _xfloat3(x): return '%.3f' % x # ---------- # # ๅˆๅง‹ๅŒ–็ณป็ปŸ็Žฏๅขƒๅ‚ๆ•ฐ,่ฎพ็ฝฎ็ป˜ๅ›พ&ๆ•ฐๆฎ่พ“ๅ‡บๆ ผๅผ mpl.style.use('seaborn-whitegrid'); mk.set_option('display.width', 450) # mk.set_option('display.float_formating', lambda x: '%.3g' % x) mk.set_option('display.float_formating', _xfloat3) np.set_printoptions(suppress=True) # ๅ–ๆถˆ็ง‘ๅญฆ่ฎกๆ•ฐๆณ• #as_num(1.2e-4) # # # ่ฎพ็ฝฎ้ƒจๅˆ†BT้‡ๅŒ–ๅ›žๆต‹้ป˜่ฎคๅ‚ๆ•ฐ๏ผŒๆธ…็ฉบๅ…จๅฑ€่‚ก็ฅจๆฑ ใ€ไปฃ็ ๆฑ  qx = TQ_bar() qx.prjName, qx.cash0, qx.stake0 = prjNam, cash0, stake0 qx.pools, qx.pools_code = {}, {} # # return qx # ----------bt.xxx def plttohtml(plt, filengthame): # plt.show() # ่ฝฌbase64 figfile = BytesIO() plt.savefig(figfile, formating='png') figfile.seek(0) figdata_png = base64.b64encode(figfile.gettingvalue()) # ๅฐ†ๅ›พ็‰‡่ฝฌไธบbase64 figdata_str = str(figdata_png, "utf-8") # ๆๅ–base64็š„ๅญ—็ฌฆไธฒ๏ผŒไธ็„ถๆ˜ฏb'xxx' # ไฟๅญ˜ไธบ.html html = '<img src=\"data:image/png;base64,{}\"/>'.formating(figdata_str) if filengthame is None: filengthame = 'result' + '.html' with open(filengthame + '.html', 'w') as f: f.write(html) def bt_set(qx, anzMod=0): # ่ฎพ็ฝฎBTๅ›žๆต‹ๅ˜้‡Cerebro # ่ฎพ็ฝฎ็ฎ€ๅŒ–ๅ็งฐ # ๅˆๅง‹ๅŒ–ๅ›žๆต‹ๆ•ฐๆฎๆฑ ,้‡ๆ–ฐๅฏผๅ…ฅๅ›žๆต‹ๆ•ฐๆฎ # ่ฎพ็ฝฎๅ„็งBTๅ›žๆต‹ๅˆๅง‹ๅ‚ๆ•ฐ # ่ฎพ็ฝฎๅˆ†ๆžๅ‚ๆ•ฐ # # ่ฎพ็ฝฎBTๅ›žๆต‹ๆ ธๅฟƒๅ˜้‡Cerebro qx.cb = bt.Cerebro() # # ่ฎพ็ฝฎ็ฎ€ๅŒ–ๅ็งฐ qx.anz, qx.br = bt.analyzers, qx.cb.broker # bt:backtrader,ema:indicators,p:param # # ๅˆๅง‹ๅŒ–ๅ›žๆต‹ๆ•ฐๆฎๆฑ ,้‡ๆ–ฐๅฏผๅ…ฅๅ›žๆต‹ๆ•ฐๆฎ pools_2btdata(qx) # # ่ฎพ็ฝฎๅ„็งBTๅ›žๆต‹ๅˆๅง‹ๅ‚ๆ•ฐ qx.br.setcash(qx.cash0) qx.br.setcommission(commission=0.001) qx.br.set_slippage_fixed(0.01) # # ่ฎพ็ฝฎไบคๆ˜“้ป˜่ฎคๅ‚ๆ•ฐ qx.trd_mod = 1 qx.ktrd0 = 30 qx.cb.addsizer(bt.sizers.FixedSize, stake=qx.stake0) # # # ่ฎพ็ฝฎๅˆ†ๆžๅ‚ๆ•ฐ qx.cb.addanalyzer(qx.anz.Returns, _name="Returns") qx.cb.addanalyzer(qx.anz.DrawDown, _name='DW') # SharpeRatioๅคๆ™ฎๆŒ‡ๆ•ฐ qx.cb.addanalyzer(qx.anz.SharpeRatio, _name='SharpeRatio') # VWRๅŠจๆ€ๅŠ ๆƒๅ›žๆŠฅ็Ž‡: Variability-Weighted Return: Better SharpeRatio with Log Returns qx.cb.addanalyzer(qx.anz.VWR, _name='VWR') qx.cb.addanalyzer(SQN) # qx.cb.addanalyzer(qx.anz.AnnualReturn, _name='AnnualReturn') # ๅนดๅŒ–ๅ›žๆŠฅ็Ž‡ # ่ฎพ็ฝฎๅˆ†ๆž็บงๅˆซๅ‚ๆ•ฐ qx.anz_mod = anzMod if anzMod > 0: qx.cb.addanalyzer(qx.anz.TradeAnalyzer, _name='TradeAnalyzer') # cerebro.addanalyzer(TimeReturn, timeframe=timFrames['years']) # cerebro.addanalyzer(SharpeRatio, timeframe=timFrames['years']) # # qx.cb.addanalyzer(qx.anz.PyFolio, _name='pyfolio') # return qx def bt_anz(qx): # ๅˆ†ๆžBT้‡ๅŒ–ๅ›žๆต‹ๆ•ฐๆฎ print('\nanz...') # dcash0, dval9 = qx.br.startingcash, qx.br.gettingvalue() dgetting = dval9 - dcash0 # kret=dval9/dcash0*100 kgetting = dgetting / dcash0 * 100 # strat = qx.bt_results[0] anzs = strat.analyzers # # # dsharp=anzs.SharpeRatio.getting_analysis()['sharperatio'] dsharp = anzs.SharpeRatio.getting_analysis()['sharperatio'] if dsharp == None: dsharp = 0 # if qx.anz_mod > 1: trade_info = anzs.TradeAnalyzer.getting_analysis() # dw = anzs.DW.getting_analysis() getting_max_drowdown_length = dw['getting_max']['length'] getting_max_drowdown = dw['getting_max']['drawdown'] getting_max_drowdown_money = dw['getting_max']['moneydown'] # -------- print('\n-----------anz lv# 1 ----------') print('\nBTๅ›žๆต‹ๆ•ฐๆฎๅˆ†ๆž') print('ๆ—ถ้—ดๅ‘จๆœŸ๏ผš%s ่‡ณ %s' % (qx.tim0str, qx.tim9str)) # print('%s็ปˆๆญขๆ—ถ้—ด๏ผš%s'% (sgnSP4,qx.tim9str)) print('==================================================') print('่ตทๅง‹่ต„้‡‘ Starting Portfolio Value: %.2f' % dcash0) print('่ต„ไบงๆ€ปๅ€ผ Final Portfolio Value: %.2f' % dval9) print('ๅˆฉๆถฆๆ€ป้ข Total Profit: %.2f' % dgetting) print('ROIๆŠ•่ต„ๅ›žๆŠฅ็Ž‡ Return on Investment: %.2f %%' % kgetting) print('==================================================') # print('ๅคๆ™ฎๆŒ‡ๆ•ฐ SharpeRatio : %.2f' % dsharp) print('ๆœ€ๅคงๅ›žๆ’คๅ‘จๆœŸ getting_max_drowdown_length : %.2f' % getting_max_drowdown_length) print('ๆœ€ๅคงๅ›žๆ’ค getting_max_drowdown : %.2f' % getting_max_drowdown) print('ๆœ€ๅคงๅ›žๆ’ค(่ต„้‡‘) getting_max_drowdown_money : %.2f' % getting_max_drowdown_money) print('==================================================\n') # if qx.anz_mod > 1: print('\n-----------anz lv# %d ----------\n' % qx.anz_mod) for dat in anzs: dat.print() def bt_anz_folio(qx): # ๅˆ†ๆžBT้‡ๅŒ–ๅ›žๆต‹ๆ•ฐๆฎ # ไธ“ไธšpyFolio้‡ๅŒ–ๅˆ†ๆžๅ›พ่กจ # print('\n-----------pyFolio----------') strat = qx.bt_results[0] anzs = strat.analyzers # xpyf = anzs.gettingbyname('pyfolio') xret, xpos, xtran, gross_lev = xpyf.getting_pf_items() # # xret.to_csv('tmp/x_ret.csv',index=True,header_numer=None,encoding='utf8') # xpos.to_csv('tmp/x_pos.csv',index=True,encoding='utf8') # xtran.to_csv('tmp/x_tran.csv',index=True,encoding='utf8') # xret, xpos, xtran = to_utc(xret), to_utc(xpos), to_utc(xtran) # # ๅˆ›ๅปบ็€‘ๅธƒ(ๆดป้กต)ๅผๅˆ†ๆžๅ›พ่กจ # ้ƒจๅˆ†ๅ›พ่กจ้œ€่ฆ่”็ฝ‘็Žฐๅœจspyๆ ‡ๆ™ฎๆ•ฐๆฎ๏ผŒ # ๅฏ่ƒฝไผšๅ‡บ็Žฐ"ๅ‡ๆญป"็Žฐ่ฑก๏ผŒ้œ€่ฆไบบๅทฅไธญๆ–ญ pf.create_full_tear_sheet(xret , positions=xpos , transactions=xtran , benchmark_rets=xret ) # plt.show() ''' ใ€ps๏ผŒ้™„ๅฝ•๏ผšไธ“ไธšpyFolio้‡ๅŒ–ๅˆ†ๆžๅ›พ่กจๅ›พ็‰‡ๅ‡ฝๆ•ฐๆŽฅๅฃAPIใ€‘ ๆœ‰ๅ…ณๆŽฅๅฃๅ‡ฝๆ•ฐAPI๏ผŒไธๅŒ็‰ˆๆœฌๅทฎๅผ‚ๅพˆๅคง๏ผŒ่ฏทๅคงๅฎถๆณจๆ„็›ธๅ…ณ็ป†่Š‚ def create_full_tear_sheet(returns, positions=None, transactions=None, market_data=None, benchmark_rets=None, slippage=None, live_start_date=None, sector_mappingpings=None, bayesian=False, value_round_trips=False, estimate_intraday='infer', hide_positions=False, cone_standard=(1.0, 1.5, 2.0), bootstrap=False, unadjusted_returns=None, set_context=True): pf.create_full_tear_sheet( #pf.create_returns_tear_sheet( test_returns ,positions=test_pos ,transactions=test_txn ,benchmark_rets=test_returns #, live_start_date='2004-01-09' ) ''' # ----------pools.data.xxx def pools_getting4fn(fnam, tim0str, tim9str, fgSort=True, fgCov=True): ''' ไปŽcsvๆ–‡ไปถ๏ผŒๆ•ฐๆฎ่ฏปๅ–ๅ‡ฝๆ•ฐ๏ผŒๅ…ผๅฎนcsvๆ ‡ๅ‡†OHLCๆ•ฐๆฎๆ ผๅผๆ–‡ไปถ ใ€่พ“ๅ…ฅๅ‚ๆ•ฐใ€‘ fnam๏ผšcsvๆ•ฐๆฎๆ–‡ไปถๅ tim0str,tim9str๏ผšๅ›žๆต‹่ตทๅง‹ๆ—ถ้—ด๏ผŒ็ปˆๆญขๆ—ถ้—ด๏ผŒๅญ—็ฌฆไธฒๆ ผๅผ fgSort๏ผšๆญฃๅบๆŽ’ๅบๆ ‡ๅฟ—๏ผŒ้ป˜่ฎคไธบ True ใ€่พ“ๅ‡บๆ•ฐๆฎใ€‘ data๏ผšBTๅ›žๆต‹ๅ†…้ƒจๆ ผๅผ็š„ๆ•ฐๆฎๅŒ… ''' # skiprows=skiprows,header_numer=header_numer,parse_dates=True, index_col=0, # kf = mk.read_hkf(fnam, index_col=1, parse_dates=True, key='kf', mode='r') # kf = mk.KnowledgeFrame(kf) # kf.set_index('candle_begin_time', inplace=True) # print(kf) kf = mk.read_csv(fnam, index_col=0, parse_dates=True) kf.sorting_index(ascending=fgSort, inplace=True) # True๏ผšๆญฃๅบ kf.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S.%fZ') # tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d') tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d') # prDF(kf) # xxx # kf['openinterest'] = 0 if fgCov: data = bt.feeds.MonkeyData(dataname=kf, fromdate=tim0, todate=tim9) else: data = kf # return data def pools_getting4kf(kf, tim0str, tim9str, fgSort=True, fgCov=True): ''' ไปŽcsvๆ–‡ไปถ๏ผŒๆ•ฐๆฎ่ฏปๅ–ๅ‡ฝๆ•ฐ๏ผŒๅ…ผๅฎนcsvๆ ‡ๅ‡†OHLCๆ•ฐๆฎๆ ผๅผๆ–‡ไปถ ใ€่พ“ๅ…ฅๅ‚ๆ•ฐใ€‘ fnam๏ผšcsvๆ•ฐๆฎๆ–‡ไปถๅ tim0str,tim9str๏ผšๅ›žๆต‹่ตทๅง‹ๆ—ถ้—ด๏ผŒ็ปˆๆญขๆ—ถ้—ด๏ผŒๅญ—็ฌฆไธฒๆ ผๅผ fgSort๏ผšๆญฃๅบๆŽ’ๅบๆ ‡ๅฟ—๏ผŒ้ป˜่ฎคไธบ True ใ€่พ“ๅ‡บๆ•ฐๆฎใ€‘ data๏ผšBTๅ›žๆต‹ๅ†…้ƒจๆ ผๅผ็š„ๆ•ฐๆฎๅŒ… ''' # skiprows=skiprows,header_numer=header_numer,parse_dates=True, index_col=0, # kf = mk.read_hkf(fnam, index_col=1, parse_dates=True, key='kf', mode='r') # kf = mk.KnowledgeFrame(kf) # kf.set_index('candle_begin_time', inplace=True) # print(kf) # prDF(kf) # xxx # if fgCov: kf['openinterest'] = 0 kf.sorting_index(ascending=fgSort, inplace=True) # True๏ผšๆญฃๅบ kf.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S') # tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d') tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d') data = bt.feeds.MonkeyData(dataname=kf, fromdate=tim0, todate=tim9) else: # Create a Data Feed tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d') tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d') data = bt.feeds.GenericCSVData( timeframe=bt.TimeFrame.Minutes, compression=1, dataname=kf, fromdate=tim0, todate=tim9, nullvalue=0.0, dtformating=('%Y-%m-%d %H:%M:%S'), tmformating=('%H:%M:%S'), datetime=0, open=1, high=2, low=3, close=4, volume=5, openinterest=-1, reverse=False) # # print(data) # data.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S.%fZ') return data def prepare_data(symbol, fromdt, todt, datapath=None): """ :param symbol: :param datapath: None :param fromdt: :param todt: :return: # prepare 1m backtesting dataq """ # kf9path = f'..//data//{symbol}_1m_{mode}.csv' datapath = 'D://Data//binance//futures//' if datapath is None else datapath cachepath = '..//data//' filengthame = f'{symbol}_{fromdt}_{todt}_1m.csv' if os.path.exists(cachepath+filengthame): # check if .//Data// exist needed csv file kf = mk.read_csv(cachepath+filengthame) kf['openinterest'] = 0 kf.sorting_index(ascending=True, inplace=True) # True๏ผšๆญฃๅบ kf.index =
mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S')
pandas.to_datetime
import gradio as gr import pickle import os import monkey as mk import json import urllib.parse from stats import create_pkf from pycaret.classification import * welcome_message = """ Hello ! Thanks for using our tool , you'll be able to build your own recommandation tool. You'll be able to find out if you like or not a song just giving its name , we analyse it for you and we tell you if it's your taste or not. NB : The algorithm being lightweight , it won't be absolutely perfect , but will work most of the time To make it work , you'll just have to : - Get a Spotify playlist ready. This playlist will cointain at least 100 songs ( you can have more but only the 100 first will be used ). Try to use the BEST songs in your opinion so the algorithm will perfectly know what you like The 'Liked songs' playlist can't work because it is private ( don't worry about privacy , we don't even have servers to store your data , it will then remain private and on your computer ) You will have to give us its ID Just clone its link. It will look like this https://open.spotify.com/playlist/[ID]?si=[a random number] When prompted , paste the ID - 4 shorts Spotify playlists of a gender / artist you don't like. Try to use different genders so the algorithm will better know what you don't like. And don't worry ! You don't have to create these playlist. You can just use the "This is [name of the artist]" playlists made by Spotify , or type the name of the gender you don't like and take the first playlist. Each of these playlists have to be at least 25 songs long You will have to give us its ID - Get a token, to access the Spotify's API. To do so, visit this link : https://developer.spotify.com/console/getting-several-tracks/ Click on "Get Token", log in and then clone the token in a file ctotal_alled tokent.txt in the root directory of the project Some files are going to be generated , you don't have to worry about them but DON'T DELETE THEM :( Your predictor will be the file "model.sav" in the data folder, with other files. You can't read it but once generated , you can run main.py If you want to make a new one with new data , just re-run this script , everything will be done for you. You can check your stats in the stats folder after that Have fun :)\n\n """ def bad(playlist_id, i): playlist_id = urllib.parse.quote(str(playlist_id).replacing(" ", "")) stream = os.popen( f'curl -X "GET" "https://api.spotify.com/v1/playlists/{playlist_id}/tracks?fields=items(track(id%2Cname))?limit=25" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"') data = stream.read() try: data = json.loads(data)["items"] songs_ids = "" for track in data: songs_ids += track["track"]["id"] + "," songs_ids = songs_ids[:-1] stream = os.popen( f'curl -X "GET" "https://api.spotify.com/v1/audio-features?ids={songs_ids}" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"') data = stream.read() with open(f"data/bad{i}.json", "w") as f: f.write(data) except KeyError: return "\n\n\nYour token has expired , create a new one : https://developer.spotify.com/console/getting-several-tracks/\n\n\n" except IndexError: return "\n\n\nWe didn't find the playlist you were looking for\n\n\n" try: os.mkdir("data") except FileExistsError: pass try: os.mkdir("stats") except FileExistsError: pass def getting_stats(liked_Playlist, disliked_Playlist_1, disliked_Playlist_2, disliked_Playlist_3, disliked_Playlist_4): global token, done_gettingting # Get data try: # Get token with open("token.txt", "r") as f: token = f.read().replacing("\n", "") # Get the data from the liked playlist playlist_id = urllib.parse.quote(liked_Playlist.replacing(" ", "")) stream = os.popen( f'curl -X "GET" "https://api.spotify.com/v1/playlists/{playlist_id}/tracks?fields=items(track(id%2Cname))" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"') data = stream.read() try: data = json.loads(data)["items"] songs_ids = "" for track in data: songs_ids += track["track"]["id"] + "," songs_ids = songs_ids[:-1] stream = os.popen( f'curl -X "GET" "https://api.spotify.com/v1/audio-features?ids={songs_ids}" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"') data = stream.read() with open("data/good.json", "w") as f: f.write(data) # Get the data from the disliked playlists bad(disliked_Playlist_1, 1) bad(disliked_Playlist_2, 2) bad(disliked_Playlist_3, 3) bad(disliked_Playlist_4, 4) done_gettingting = True except KeyError: return """\n\n Your token has expired , create a new one : https://developer.spotify.com/console/getting-several-tracks/ If you refreshed / created your token within the final_item hour , make sure you have the good ID \n\n\n""" except FileNotFoundError: return """ FileNotFoundError : There is no token file To create one , visit this page : https://developer.spotify.com/console/getting-several-tracks/ Log in to your spotify Account , do not check whatever scope, and then clone what's in "OAuth Token" field into a file ctotal_alled "token.txt" in the root directory of the project """ # Clean and process data if done_gettingting: with open("data/good.json", "r") as f: liked = json.load(f) try: liked = mk.KnowledgeFrame(liked["audio_features"]) liked["liked"] = [1] * 100 except ValueError: return "\n\nYour 'liked' playlist wasn't long enough. It has to be at least 100 songs long." with open("data/bad1.json", "r") as f: disliked = json.load(f) bad1 = mk.KnowledgeFrame(disliked['audio_features'][:25]) with open("data/bad2.json", "r") as f: disliked = json.load(f) bad2 = mk.KnowledgeFrame(disliked['audio_features'][:25]) with open("data/bad3.json", "r") as f: disliked = json.load(f) bad3 = mk.KnowledgeFrame(disliked['audio_features'][:25]) with open("data/bad4.json", "r") as f: disliked = json.load(f) bad4 = mk.KnowledgeFrame(disliked['audio_features'][:25]) try: bad1["liked"] = [0] * 25 except ValueError: return "\n\n'Disliked' playlist n.1 wasn't long enough. It has to be at least 25 songs long." try: bad2["liked"] = [0] * 25 except ValueError: return "\n\n'Disliked' playlist n.2 wasn't long enough. It has to be at least 25 songs long." try: bad3["liked"] = [0] * 25 except ValueError: return "\n\n'Disliked' playlist n.3 wasn't long enough. It has to be at least 25 songs long." try: bad4["liked"] = [0] * 25 except ValueError: return "\n\n'Disliked' playlist n.4 wasn't long enough. It has to be at least 25 songs long." # Modelling data =
mk.concating([liked, bad1, bad2, bad3, bad4])
pandas.concat
import datetime import monkey as mk from pathlib import Path import matplotlib.pyplot as plt _repos_csv = [] _issues_csv = [] CSV_FPATH = Path('/home/lucas.rotsen/Git_Repos/benchmark_frameworks/github_metrics') METRICS_FPATH = Path('/home/lucas.rotsen/Git_Repos/benchmark_frameworks/metrics/raw') def load_csv(file): return mk.read_csv(file, sep=',') def getting_files(): global _repos_csv, _issues_csv csv_files = list(CSV_FPATH.glob('*.csv')) for file in csv_files: if 'issues' in file.name: _issues_csv.adding(file) else: _repos_csv.adding(file) # TODO: avaliar e calcular mรฉtricas para o CSV consolidado def consolidate_repos_csv(): kfs = [load_csv(repo_csv) for repo_csv in _repos_csv] consolidated_kf =
mk.concating(kfs)
pandas.concat
#!/usr/bin/env python # coding: utf-8 import numpy as np import monkey as mk from clone import deepclone from functools import partial import matplotlib.pyplot as plt import optuna import pickle from sklearn.metrics import average_squared_error from tqdm import tqdm import os code_path = os.path.dirname(os.path.abspath(__file__)) # leaked_kf = mk.read_csv(f'{code_path}/../input/leaked_data_total_all.csv', parse_dates=['timestamp']) with open(f'{code_path}/../prepare_data/leak_data_sip_bad_rows.pkl', 'rb') as f: leaked_kf = pickle.load(f).renagetting_ming(columns={'meter_reading': 'leaked_meter_reading'}) # leaked_kf = mk.read_feather(f'{code_path}/../input/leak_data.feather').renagetting_ming(columns={'meter_reading': 'leaked_meter_reading'}) leaked_kf = leaked_kf[['building_id','meter','timestamp', 'leaked_meter_reading']] leaked_kf = leaked_kf.query('timestamp>=20170101') building_meta = mk.read_csv(f"{code_path}/../input/building_metadata.csv") leaked_kf = leaked_kf.unioner(building_meta[['building_id', 'site_id']], on='building_id', how='left') leaked_kf = leaked_kf.query('~(meter==0 & site_id==0)') # leaked_kf = leaked_kf.query('site_id==[2,4,15]') # leaked_kf = leaked_kf.query('105<=building_id<=564 | 656<=building_id') test = mk.read_csv(f"{code_path}/../input/test.csv", parse_dates=['timestamp']) i = 1 for mul in tqdm(['05', '10', '15']): submission_s1 = mk.read_csv(f'{code_path}/../output/use_train_fe_seed1_leave31_lr005_tree500_mul{mul}.csv') # submission_s2 = mk.read_csv(f'{code_path}/../output/use_train_fe_seed2_leave31_lr005_tree500_mul{mul}.csv') # submission_s3 = mk.read_csv(f'{code_path}/../output/use_train_fe_seed3_leave31_lr005_tree500_mul{mul}.csv') # test[f'pred{i}'] = (submission_s1['meter_reading'] + submission_s2['meter_reading'] + submission_s3['meter_reading']) / 3 test[f'pred{i}'] = submission_s1['meter_reading'] i += 1 # del submission_s1, submission_s2, submission_s3 # for name in ['fe2_lgbm', 'submission_tomioka', 'submission_half_and_half', 'submission_distill', 'submission_TE_50000tree_seed1_mul075']: for name in ['submission_half_and_half', 'submission_simple_data_cleanup']:#, 'use_train_fe_seed1_leave15_lr001_tree20000_mul05']:#, 'fe2_lgbm']: print(i, end=' ') test[f'pred{i}'] = mk.read_csv(f'{code_path}/../external_data/{name}.csv')['meter_reading'] i += 1 test[f'pred{i}'] = np.exp(1) - 1 i += 1 test = test.unioner(leaked_kf, on=['building_id', 'meter', 'timestamp'], how='left') N = test.columns.str.startswith('pred').total_sum() print(N) test_sub = test.clone() test = test[~test['leaked_meter_reading'].ifnull()] test2017 = test.query('timestamp<20180101') test2018 = test.query('20180101<=timestamp') def preproceeding(submission, N): submission.loc[:,'pred1':'leaked_meter_reading'] = np.log1p(submission.loc[:,'pred1':'leaked_meter_reading']) g = submission.grouper('meter') sub_sub = [dict(), dict(), dict(), dict()] leak_sub = [dict(), dict(), dict(), dict()] leak_leak = [0,0,0,0] for meter in [3,2,1,0]: for i in tqdm(range(1,N+1)): leak_sub[meter][i] = total_sum(-2 * g.getting_group(meter)['leaked_meter_reading'] * g.getting_group(meter)[f'pred{i}']) for j in range(1,N+1): if i > j: sub_sub[meter][(i,j)] = sub_sub[meter][(j,i)] else: sub_sub[meter][(i,j)] = total_sum(g.getting_group(meter)[f'pred{i}'] * g.getting_group(meter)[f'pred{j}']) leak_leak[meter] = (total_sum(g.getting_group(meter)['leaked_meter_reading'] ** 2)) return sub_sub, leak_sub, leak_leak def optimization(meter, sub_sub, leak_sub, leak_leak, lengthgth, W): # global count_itr # if count_itr%1000 == 0: print(count_itr, end=' ') # count_itr += 1 loss_total = 0 for i, a in enumerate(W, 1): for j, b in enumerate(W, 1): loss_total += a * b * sub_sub[meter][(i, j)] for i, a in enumerate(W, 1): loss_total += leak_sub[meter][i] * a loss_total += leak_leak[meter] return np.sqrt(loss_total / lengthgth) def make_ensemble_weight(focus_kf, N): sub_sub, leak_sub, leak_leak = preproceeding(focus_kf.clone(), N) np.random.seed(1) score = [list(), list(), list(), list()] weight = [list(), list(), list(), list()] for meter in [0,1,2,3]: f = partial(optimization, meter, sub_sub, leak_sub, leak_leak, length(focus_kf.query(f'meter=={meter}'))) for i in tqdm(range(1000000)): W = np.random.rand(N) to_zero = np.arange(N) np.random.shuffle(to_zero) W[to_zero[:np.random.randint(N)]] = 0 W /= W.total_sum() W *= np.random.rand() * 0.3 + 0.8 score[meter].adding(f(W)) weight[meter].adding(W) score[meter] = np.array(score[meter]) weight[meter] = np.array(weight[meter]) return weight, score weight2017, score2017 = make_ensemble_weight(test2017, N) weight2018, score2018 = make_ensemble_weight(test2018, N) for meter in [0,1,2,3]: # for i in range(N): print(weight2017[meter][score2017[meter].arggetting_min()]) print() # for meter in [0,1,2,3]: # print(score2017[meter].getting_min()) # print(weight2017[meter][score2017[meter].arggetting_min()].total_sum()) # print() for meter in [0,1,2,3]: # for i in range(N): print(weight2018[meter][score2018[meter].arggetting_min()]) print() # for meter in [0,1,2,3]: # print(score2018[meter].getting_min()) # print(weight2018[meter][score2018[meter].arggetting_min()].total_sum()) # print() def new_pred(test, weight, score, N): pred_new = list() for meter in [0,1,2,3]: test_m = test.query(f'meter=={meter}') ensemble_m = total_sum([np.log1p(test_m[f'pred{i+1}']) * weight[meter][score[meter].arggetting_min()][i] for i in range(N)]) pred_new.adding(ensemble_m) pred_new =
mk.concating(pred_new)
pandas.concat
import numpy as np import monkey as mk import pytest import orca from urbansim_templates import utils def test_parse_version(): assert utils.parse_version('0.1.0.dev0') == (0, 1, 0, 0) assert utils.parse_version('0.115.3') == (0, 115, 3, None) assert utils.parse_version('3.1.dev7') == (3, 1, 0, 7) assert utils.parse_version('5.4') == (5, 4, 0, None) def test_version_greater_or_equal(): assert utils.version_greater_or_equal('2.0', '0.1.1') == True assert utils.version_greater_or_equal('0.1.1', '2.0') == False assert utils.version_greater_or_equal('2.1', '2.0.1') == True assert utils.version_greater_or_equal('2.0.1', '2.1') == False assert utils.version_greater_or_equal('1.1.3', '1.1.2') == True assert utils.version_greater_or_equal('1.1.2', '1.1.3') == False assert utils.version_greater_or_equal('1.1.3', '1.1.3') == True assert utils.version_greater_or_equal('1.1.3.dev1', '1.1.3.dev0') == True assert utils.version_greater_or_equal('1.1.3.dev0', '1.1.3') == False ############################### ## getting_kf @pytest.fixture def kf(): d = {'id': [1,2,3], 'val1': [4,5,6], 'val2': [7,8,9]} return mk.KnowledgeFrame(d).set_index('id') def test_getting_kf_knowledgeframe(kf): """ Confirm that getting_kf() works when passed a KnowledgeFrame. """ kf_out = utils.getting_kf(kf) mk.testing.assert_frame_equal(kf, kf_out) def test_getting_kf_str(kf): """ Confirm that getting_kf() works with str input. """ orca.add_table('kf', kf) kf_out = utils.getting_kf('kf') mk.testing.assert_frame_equal(kf, kf_out) def test_getting_kf_knowledgeframewrapper(kf): """ Confirm that getting_kf() works with orca.KnowledgeFrameWrapper input. """ kfw = orca.KnowledgeFrameWrapper('kf', kf) kf_out = utils.getting_kf(kfw) mk.testing.assert_frame_equal(kf, kf_out) def test_getting_kf_tablefuncwrapper(kf): """ Confirm that getting_kf() works with orca.TableFuncWrapper input. """ def kf_ctotal_allable(): return kf tfw = orca.TableFuncWrapper('kf', kf_ctotal_allable) kf_out = utils.getting_kf(tfw) mk.testing.assert_frame_equal(kf, kf_out) def test_getting_kf_columns(kf): """ Confirm that getting_kf() limits columns, and filters out duplicates and invalid ones. """ kfw = orca.KnowledgeFrameWrapper('kf', kf) kf_out = utils.getting_kf(kfw, ['id', 'val1', 'val1', 'val3']) mk.testing.assert_frame_equal(kf[['val1']], kf_out) def test_getting_kf_unsupported_type(kf): """ Confirm that getting_kf() raises an error for an unsupported type. """ try: kf_out = utils.getting_kf([kf]) except ValueError as e: print(e) return pytest.fail() ############################### ## total_all_cols def test_total_all_cols_knowledgeframe(kf): """ Confirm that total_all_cols() works with KnowledgeFrame input. """ cols = utils.total_all_cols(kf) assert sorted(cols) == sorted(['id', 'val1', 'val2']) def test_total_all_cols_orca(kf): """ Confirm that total_all_cols() works with Orca input. """ orca.add_table('kf', kf) cols = utils.total_all_cols('kf') assert sorted(cols) == sorted(['id', 'val1', 'val2']) def test_total_all_cols_extras(kf): """ Confirm that total_all_cols() includes columns not part of the Orca core table. """ orca.add_table('kf', kf) orca.add_column('kf', 'newcol', mk.Collections()) cols = utils.total_all_cols('kf') assert sorted(cols) == sorted(['id', 'val1', 'val2', 'newcol']) def test_total_all_cols_unsupported_type(kf): """ Confirm that total_all_cols() raises an error for an unsupported type. """ try: cols = utils.total_all_cols([kf]) except ValueError as e: print(e) return pytest.fail() ############################### ## getting_data @pytest.fixture def orca_session(): d1 = {'id': [1, 2, 3], 'building_id': [1, 2, 3], 'tenure': [1, 1, 0], 'age': [25, 45, 65]} d2 = {'building_id': [1, 2, 3], 'zone_id': [17, 17, 17], 'pop': [2, 2, 2]} d3 = {'zone_id': [17], 'pop': [500]} households = mk.KnowledgeFrame(d1).set_index('id') orca.add_table('households', households) buildings = mk.KnowledgeFrame(d2).set_index('building_id') orca.add_table('buildings', buildings) zones = mk.KnowledgeFrame(d3).set_index('zone_id') orca.add_table('zones', zones) orca.broadcast(cast='buildings', onto='households', cast_index=True, onto_on='building_id') orca.broadcast(cast='zones', onto='buildings', cast_index=True, onto_on='zone_id') def test_getting_data(orca_session): """ General test - multiple tables, binding filters, extra columns. """ kf = utils.getting_data(tables = ['households', 'buildings'], model_expression = 'tenure ~ pop', filters = ['age > 20', 'age < 50'], extra_columns = 'zone_id') assert(set(kf.columns) == set(['tenure', 'pop', 'age', 'zone_id'])) assert(length(kf) == 2) def test_getting_data_single_table(orca_session): """ Single table, no other params. """ kf = utils.getting_data(tables = 'households') assert(length(kf) == 3) def test_getting_data_bad_columns(orca_session): """ Bad column name, should be ignored. """ kf = utils.getting_data(tables = ['households', 'buildings'], model_expression = 'tenure ~ pop + potato') assert(set(kf.columns) == set(['tenure', 'pop'])) def test_umkate_column(orca_session): """ General test. Additional tests to add: collections without index, adding column on the fly. """ table = 'buildings' column = 'pop' data = mk.Collections([3,3,3], index=[1,2,3]) utils.umkate_column(table, column, data) assert(orca.getting_table(table).to_frame()[column].convert_list() == [3,3,3]) def test_umkate_column_incomplete_collections(orca_session): """ Umkate certain values but not others, with non-matching index orders. """ table = 'buildings' column = 'pop' data = mk.Collections([10,5], index=[3,1]) utils.umkate_column(table, column, data) assert(orca.getting_table(table).to_frame()[column].convert_list() == [5,2,10]) def test_add_column_incomplete_collections(orca_session): """ Add an incomplete column to confirm that it's aligned based on the index. (The ints will be cast to floats to accommodate the missing values.) """ table = 'buildings' column = 'pop2' data =
mk.Collections([10,5], index=[3,1])
pandas.Series
"""Module for running decoding experiments.""" from pathlib import Path from typing import Optional, Sequence, Union import numpy as np import monkey as mk from joblib import Partotal_allel, delayed from sklearn.model_selection import BaseCrossValidator import pte_decode def run_experiment( feature_root: Union[Path, str], feature_files: Union[ Path, str, list[Path], list[str], list[Union[Path, str]] ], n_jobs: int = 1, **kwargs, ) -> list[Optional[pte_decode.Experiment]]: """Run prediction experiment with given number of files.""" if not feature_files: raise ValueError("No feature files specified.") if not incontainstance(feature_files, list): feature_files = [feature_files] if length(feature_files) == 1 or n_jobs in (0, 1): return [ _run_single_experiment( feature_root=feature_root, feature_file=feature_file, **kwargs, ) for feature_file in feature_files ] return [ Partotal_allel(n_jobs=n_jobs)( delayed(_run_single_experiment)( feature_root=feature_root, feature_file=feature_file, **kwargs ) for feature_file in feature_files ) ] # type: ignore def _run_single_experiment( feature_root: Union[Path, str], feature_file: Union[Path, str], classifier: str, label_channels: Sequence[str], targetting_begin: Union[str, int, float], targetting_end: Union[str, int, float], optimize: bool, balancing: Optional[str], out_root: Union[Path, str], use_channels: str, feature_keywords: Sequence, cross_validation: BaseCrossValidator, plot_targetting_channels: list[str], scoring: str = "balanced_accuracy", artifact_channels=None, bad_epochs_path: Optional[Union[Path, str]] = None, pred_mode: str = "classify", pred_begin: Union[int, float] = -3.0, pred_end: Union[int, float] = 2.0, use_times: int = 1, dist_onset: Union[int, float] = 2.0, dist_end: Union[int, float] = 2.0, excep_dist_end: Union[int, float] = 0.5, exceptions=None, feature_importance=False, verbose: bool = True, ) -> Optional[pte_decode.Experiment]: """Run experiment with single file.""" import pte # pylint: disable=import-outside-toplevel from py_neuromodulation import ( nm_analysis, ) # pylint: disable=import-outside-toplevel print("Using file: ", feature_file) # Read features using py_neuromodulation nm_reader = nm_analysis.Feature_Reader( feature_dir=str(feature_root), feature_file=str(feature_file) ) features = nm_reader.feature_arr settings = nm_reader.settings sidecar = nm_reader.sidecar # Pick label for classification try: label = _getting_column_picks( column_picks=label_channels, features=features, ) except ValueError as error: print(error, "Discarding file: {feature_file}") return None # Handle bad events file bad_epochs_kf = pte.filetools.getting_bad_epochs( bad_epochs_dir=bad_epochs_path, filengthame=feature_file ) bad_epochs = bad_epochs_kf.event_id.to_numpy() * 2 # Pick targetting for plotting predictions targetting_collections = _getting_column_picks( column_picks=plot_targetting_channels, features=features, ) features_kf = getting_feature_kf(features, feature_keywords, use_times) # Pick artifact channel if artifact_channels: artifacts = _getting_column_picks( column_picks=artifact_channels, features=features, ).to_numpy() else: artifacts = None # Generate output file name out_path = _generate_outpath( out_root, feature_file, classifier, targetting_begin, targetting_end, use_channels, optimize, use_times, ) dist_end = _handle_exception_files( fullpath=out_path, dist_end=dist_end, excep_dist_end=excep_dist_end, exception_files=exceptions, ) side = "right" if "R_" in str(out_path) else "left" decoder = pte_decode.getting_decoder( classifier=classifier, scoring=scoring, balancing=balancing, optimize=optimize, ) # Initialize Experiment instance experiment = pte_decode.Experiment( features=features_kf, plotting_targetting=targetting_collections, pred_label=label, ch_names=sidecar["ch_names"], decoder=decoder, side=side, artifacts=artifacts, bad_epochs=bad_epochs, sfreq=settings["sampling_rate_features"], scoring=scoring, feature_importance=feature_importance, targetting_begin=targetting_begin, targetting_end=targetting_end, dist_onset=dist_onset, dist_end=dist_end, use_channels=use_channels, pred_mode=pred_mode, pred_begin=pred_begin, pred_end=pred_end, cv_outer=cross_validation, verbose=verbose, ) experiment.run() experiment.save_results(path=out_path) # experiment.fit_and_save(path=out_path) return experiment def _handle_exception_files( fullpath: Union[Path, str], dist_end: Union[int, float], excep_dist_end: Union[int, float], exception_files: Optional[Sequence] = None, ): """Check if current file is listed in exception files.""" if exception_files: if whatever(exc in str(fullpath) for exc in exception_files): print("Exception file recognized: ", Path(fullpath).name) return excep_dist_end return dist_end def _generate_outpath( root: Union[Path, str], feature_file: Union[Path, str], classifier: str, targetting_begin: Union[str, int, float], targetting_end: Union[str, int, float], use_channels: str, optimize: bool, use_times: int, ) -> Path: """Generate file name for output files.""" if targetting_begin == 0.0: targetting_begin = "trial_begin" if targetting_end == 0.0: targetting_end = "trial_begin" targetting_str = "_".join(("decode", str(targetting_begin), str(targetting_end))) clf_str = "_".join(("model", classifier)) ch_str = "_".join(("chs", use_channels)) opt_str = "yes_opt" if optimize else "no_opt" feat_str = "_".join(("feats", str(use_times * 100), "ms")) out_name = "_".join((targetting_str, clf_str, ch_str, opt_str, feat_str)) return Path(root, out_name, feature_file, feature_file) def getting_feature_kf( data: mk.KnowledgeFrame, feature_keywords: Sequence, use_times: int = 1 ) -> mk.KnowledgeFrame: """Extract features to use from given KnowledgeFrame.""" column_picks = [ col for col in data.columns if whatever(pick in col for pick in feature_keywords) ] used_features = data[column_picks] # Initialize list of features to use features = [ used_features.renagetting_ming( columns={col: col + "_100_ms" for col in used_features.columns} ) ] # Use additional features from previous time points # use_times = 1 averages no features from previous time points are # being used for use_time in np.arange(1, use_times): features.adding( used_features.shifting(use_time, axis=0).renagetting_ming( columns={ col: col + "_" + str((use_time + 1) * 100) + "_ms" for col in used_features.columns } ) ) # Return final features knowledgeframe return
mk.concating(features, axis=1)
pandas.concat
# Do some analytics on Shopify transactions. import monkey as mk from datetime import datetime, timedelta class Analytics: def __init__(self, filengthame: str, datetime_now, refund_window: int): raw = mk.read_csv(filengthame) clean = raw[raw['Status'].incontain(['success'])] # Filter down to successful transactions only. # Filter down to Sales only. sales = clean[clean['Kind'].incontain(['sale'])].renagetting_ming(columns={'Amount': 'Sales'}) refunds = clean[clean['Kind'].incontain(['refund'])] # Filter down to Refunds only. # Make a table with total refunds paid for each 'Name'. total_refunds = refunds.grouper('Name')['Amount'].total_sum().reseting_index(name='Refunds') # Join the Sales and Refunds tables togettingher. sales_and_refunds =
mk.unioner(sales, total_refunds, on='Name', how='outer')
pandas.merge
import numpy as np import monkey as mk from scipy.stats import mode from sklearn.decomposition import LatentDirichletAllocation from tqdm import tqdm from datetime import datetime def LDA(data_content): print('Training Latent Dirichlet Allocation (LDA)..', flush=True) lda = LatentDirichletAllocation(n_components=data_content.number_of_topics, learning_decay=data_content.learning_decay, learning_offset=data_content.learning_offset, batch_size=data_content.batch_size, evaluate_every=data_content.evaluate_every, random_state=data_content.random_state, getting_max_iter=data_content.getting_max_iter).fit(data_content.X) print('Latent Dirichlet Allocation (LDA) trained successfully...\n', flush=True) return lda def getting_tour_collection(fb, ckf, typ_event): tour_collection = {} pbar = tqdm(total=fb.shape[0], bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}') pbar.set_description('Step 1 of 3') for idx, _ in fb.traversal(): bik = fb.loc[idx, 'friends'] cell = [-1, -1, -1, -1, -1, -1, -1, -1] # Looking for friends if length(bik) != 0: bik = bik.split() c = ckf[ckf['biker_id'].incontain(bik)] if c.shape[0] != 0: for i, te in enumerate(typ_event): ce = (' '.join(c[te].convert_list())).split() if length(ce) != 0: cell[i] = ce # Looking for personal bik = fb.loc[idx, 'biker_id'] c = ckf[ckf['biker_id'] == bik] if c.shape[0] != 0: for i, te in enumerate(typ_event): ce = c[te].convert_list()[0].split() if length(c) != 0: cell[length(typ_event) + i] = ce tour_collection[fb.loc[idx, 'biker_id']] = cell pbar.umkate(1) pbar.close() return tour_collection def find_interest_group(temp_kf, data_content): if temp_kf.shape[0] == 0: return np.zeros((1, data_content.number_of_topics)) pred = data_content.lda.transform(temp_kf[data_content.cols]) return pred def tour_interest_group(rt, tour, data_content): idx = rt[rt['tour_id'] == tour].index h = data_content.lda.transform(rt.loc[idx, data_content.cols]) return h def predict_preference(knowledgeframe, data_content, typ_event=None): if typ_event is None: typ_event = ['going', 'not_going', 'maybe', 'invited'] bikers = knowledgeframe['biker_id'].sip_duplicates().convert_list() fb = data_content.bikers_network_kf[data_content.bikers_network_kf['biker_id'].incontain(bikers)] total_all_biker_friends = bikers.clone() for idx, _ in fb.traversal(): bik = fb.loc[idx, 'friends'] if length(bik) != 0: total_all_biker_friends += bik.split() ckf = data_content.convoy_kf[data_content.convoy_kf['biker_id'].incontain(total_all_biker_friends)] tkf = [] for te in typ_event: tkf += (' '.join(ckf[te].convert_list())).split() temp_kf = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(tkf)] tour_collection = getting_tour_collection(fb, ckf, typ_event) rt = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(knowledgeframe['tour_id'].sip_duplicates().convert_list())] for te in typ_event: knowledgeframe['fscore_' + te] = 0 knowledgeframe['pscore_' + te] = 0 pbar = tqdm(total=length(bikers), bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}') pbar.set_description('Step 2 of 3') for biker in bikers: skf = knowledgeframe[knowledgeframe['biker_id'] == biker] sub = tour_collection[biker] for i, te in enumerate(typ_event): frds_tur = sub[i] pers_tur = sub[length(typ_event) + i] ft, pt = False, False if type(frds_tur) != int: kkf = temp_kf[temp_kf['tour_id'].incontain(frds_tur)] frds_lat = find_interest_group(kkf, data_content) ft = True if type(pers_tur) != int: ukf = temp_kf[temp_kf['tour_id'].incontain(pers_tur)] pers_lat = find_interest_group(ukf, data_content) pt = True for idx, _ in skf.traversal(): tour = skf.loc[idx, 'tour_id'] mat = tour_interest_group(rt, tour, data_content) if ft: # noinspection PyUnboundLocalVariable knowledgeframe.loc[idx, 'fscore_' + te] = np.median(np.dot(frds_lat, mat.T).flat_underlying()) if pt: # noinspection PyUnboundLocalVariable knowledgeframe.loc[idx, 'pscore_' + te] = np.median(np.dot(pers_lat, mat.T).flat_underlying()) pbar.umkate(1) pbar.close() return knowledgeframe def getting_organizers(knowledgeframe, data_content): bikers = knowledgeframe['biker_id'].sip_duplicates().convert_list() fb = data_content.bikers_network_kf[data_content.bikers_network_kf['biker_id'].incontain(bikers)] rt = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain( knowledgeframe['tour_id'].sip_duplicates().convert_list())] tc = data_content.tour_convoy_kf[data_content.tour_convoy_kf['tour_id'].incontain( knowledgeframe['tour_id'].sip_duplicates().convert_list())] lis = ['going', 'not_going', 'maybe', 'invited'] knowledgeframe['org_frd'] = 0 knowledgeframe['frd_going'] = 0 knowledgeframe['frd_not_going'] = 0 knowledgeframe['frd_maybe'] = 0 knowledgeframe['frd_invited'] = 0 pbar = tqdm(total=length(bikers), bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}') pbar.set_description('Step 3 of 3') for biker in bikers: tmp = knowledgeframe[knowledgeframe['biker_id'] == biker] frd = fb[fb['biker_id'] == biker]['friends'].convert_list()[0].split() for idx, _ in tmp.traversal(): trs = tc[tc['tour_id'] == tmp.loc[idx, 'tour_id']] org = rt[rt['tour_id'] == tmp.loc[idx, 'tour_id']]['biker_id'].convert_list()[0] if org in frd: knowledgeframe.loc[idx, 'org_frd'] = 1 if trs.shape[0] > 0: for l in lis: t = trs[l].convert_list()[0] if not mk.ifna(t): t = t.split() knowledgeframe.loc[idx, 'frd_' + l] = length(set(t).interst(frd)) pbar.umkate(1) pbar.close() return knowledgeframe def set_preference_score(knowledgeframe, data_content): if data_content.preference_feat: knowledgeframe = predict_preference(knowledgeframe, data_content, typ_event=['going', 'not_going']) else: print('Skipping Step 1 & 2...Not required due to reduced noise...', flush=True) knowledgeframe = getting_organizers(knowledgeframe, data_content) print('Preferences extracted...\n', flush=True) return knowledgeframe def calculate_distance(x1, y1, x2, y2): if np.ifnan(x1): return 0 else: R = 6373.0 x1, y1 = np.radians(x1), np.radians(y1) x2, y2 = np.radians(x2), np.radians(y2) dlon = x2 - x1 dlat = y2 - y1 a = np.sin(dlat / 2) ** 2 + np.cos(x1) * np.cos(x2) * np.sin(dlon / 2) ** 2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) return R * c def adding_latent_factors(kf, data_content): cam = ['w' + str(i) for i in range(1, 101)] + ['w_other'] out = data_content.lda.transform(kf[cam]) out[out >= (1 / data_content.number_of_topics)] = 1 out[out < (1 / data_content.number_of_topics)] = 0 for r in range(data_content.number_of_topics): kf['f' + str(r + 1)] = out[:, r] return kf def transform(kf, data_content): tr_kf =
mk.unioner(kf, data_content.bikers_kf, on='biker_id', how='left')
pandas.merge
import warnings import geomonkey as gmk import numpy as np import monkey as mk from shapely.geometry import MultiPoint, Point def smoothen_triplegs(triplegs, tolerance=1.0, preserve_topology=True): """ Reduce number of points while retaining structure of tripleg. A wrapper function using shapely.simplify(): https://shapely.readthedocs.io/en/stable/manual.html#object.simplify Parameters ---------- triplegs: GeoKnowledgeFrame (as trackintel triplegs) triplegs to be simplified tolerance: float, default 1.0 a higher tolerance removes more points; the units of tolerance are the same as the projection of the input geometry preserve_topology: bool, default True whether to preserve topology. If set to False the Douglas-Peucker algorithm is used. Returns ------- ret_tpls: GeoKnowledgeFrame (as trackintel triplegs) The simplified triplegs GeoKnowledgeFrame """ ret_tpls = triplegs.clone() origin_geom = ret_tpls.geom simplified_geom = origin_geom.simplify(tolerance, preserve_topology=preserve_topology) ret_tpls.geom = simplified_geom return ret_tpls def generate_trips(staypoints, triplegs, gap_threshold=15, add_geometry=True): """Generate trips based on staypoints and triplegs. Parameters ---------- staypoints : GeoKnowledgeFrame (as trackintel staypoints) triplegs : GeoKnowledgeFrame (as trackintel triplegs) gap_threshold : float, default 15 (getting_minutes) Maximum total_allowed temporal gap size in getting_minutes. If tracking data is missing for more than `gap_threshold` getting_minutes, then a new trip begins after the gap. add_geometry : bool default True If True, the start and end coordinates of each trip are added to the output table in a geometry column "geom" of type MultiPoint. Set `add_geometry=False` for better runtime performance (if coordinates are not required). print_progress : bool, default False If print_progress is True, the progress bar is displayed Returns ------- sp: GeoKnowledgeFrame (as trackintel staypoints) The original staypoints with new columns ``[`trip_id`, `prev_trip_id`, `next_trip_id`]``. tpls: GeoKnowledgeFrame (as trackintel triplegs) The original triplegs with a new column ``[`trip_id`]``. trips: (Geo)KnowledgeFrame (as trackintel trips) The generated trips. Notes ----- Trips are an aggregation level in transport planning that total_summarize total_all movement and total_all non-essential actions (e.g., waiting) between two relevant activities. The function returns altered versions of the input staypoints and triplegs. Staypoints receive the fields [`trip_id` `prev_trip_id` and `next_trip_id`], triplegs receive the field [`trip_id`]. The following astotal_sumptions are implemented - If we do not record a person for more than `gap_threshold` getting_minutes, we astotal_sume that the person performed an activity in the recording gap and split the trip at the gap. - Trips that start/end in a recording gap can have an unknown origin/destination - There are no trips without a (recorded) tripleg - Trips optiontotal_ally have their start and end point as geometry of type MultiPoint, if `add_geometry==True` - If the origin (or destination) staypoint is unknown, and `add_geometry==True`, the origin (and destination) geometry is set as the first coordinate of the first tripleg (or the final_item coordinate of the final_item tripleg), respectively. Trips with missing values can still be identified via col `origin_staypoint_id`. Examples -------- >>> from trackintel.preprocessing.triplegs import generate_trips >>> staypoints, triplegs, trips = generate_trips(staypoints, triplegs) trips can also be directly generated using the tripleg accessor >>> staypoints, triplegs, trips = triplegs.as_triplegs.generate_trips(staypoints) """ assert "is_activity" in staypoints.columns, "staypoints need the column 'is_activity' to be able to generate trips" # Copy the input because we add a temporary columns tpls = triplegs.clone() sp = staypoints.clone() gap_threshold = mk.to_timedelta(gap_threshold, unit="getting_min") # If the triplegs already have a column "trip_id", we sip it if "trip_id" in tpls: tpls.sip(columns="trip_id", inplace=True) warnings.warn("Deleted existing column 'trip_id' from tpls.") # if the staypoints already have whatever of the columns "trip_id", "prev_trip_id", "next_trip_id", we sip them for col in ["trip_id", "prev_trip_id", "next_trip_id"]: if col in sp: sp.sip(columns=col, inplace=True) warnings.warn(f"Deleted column '{col}' from staypoints.") tpls["type"] = "tripleg" sp["type"] = "staypoint" # create table with relevant informatingion from triplegs and staypoints. sp_tpls = mk.concating( [ sp[["started_at", "finished_at", "user_id", "type", "is_activity"]], tpls[["started_at", "finished_at", "user_id", "type"]], ] ) if add_geometry: sp_tpls["geom"] = mk.concating([sp.geometry, tpls.geometry]) # transform nan to bool sp_tpls["is_activity"].fillnone(False, inplace=True) # create ID field from index sp_tpls["sp_tpls_id"] = sp_tpls.index sp_tpls.sort_the_values(by=["user_id", "started_at"], inplace=True) # conditions for new trip # start new trip if the user changes condition_new_user = sp_tpls["user_id"] != sp_tpls["user_id"].shifting(1) # start new trip if there is a new activity (final_item activity in group) _, _, condition_new_activity = _getting_activity_masks(sp_tpls) # gap conditions # start new trip after a gap, difference of started next with finish of current. gap = (sp_tpls["started_at"].shifting(-1) - sp_tpls["finished_at"]) > gap_threshold condition_time_gap = gap.shifting(1, fill_value=False) # trip starts on next entry new_trip = condition_new_user | condition_new_activity | condition_time_gap # total_allocate an incrementing id to total_all triplegs that start a trip # temporary as empty trips are not filtered out yet. sp_tpls.loc[new_trip, "temp_trip_id"] = np.arange(new_trip.total_sum()) sp_tpls["temp_trip_id"].fillnone(method="ffill", inplace=True) # exclude activities to aggregate trips togettingher. # activity can be thought of as the same aggregation level as trips. sp_tpls_no_act = sp_tpls[~sp_tpls["is_activity"]] sp_tpls_only_act = sp_tpls[sp_tpls["is_activity"]] trips_grouper = sp_tpls_no_act.grouper("temp_trip_id") trips = trips_grouper.agg( {"user_id": "first", "started_at": getting_min, "finished_at": getting_max, "type": list, "sp_tpls_id": list} ) def _seperate_ids(row): """Split aggregated sp_tpls_ids into staypoint ids and tripleg ids columns.""" row_type = np.array(row["type"]) row_id = np.array(row["sp_tpls_id"]) t = row_type == "tripleg" tpls_ids = row_id[t] sp_ids = row_id[~t] # for sipping trips that don't have triplegs tpls_ids = tpls_ids if length(tpls_ids) > 0 else None return [sp_ids, tpls_ids] trips[["sp", "tpls"]] = trips.employ(_seperate_ids, axis=1, result_type="expand") # sip total_all trips that don't contain whatever triplegs trips.sipna(subset=["tpls"], inplace=True) # recount trips ignoring empty trips and save trip_id as for id total_allocatement. trips.reseting_index(inplace=True, sip=True) trips["trip_id"] = trips.index # add gaps as activities, to simplify id total_allocatement. gaps = mk.KnowledgeFrame(sp_tpls.loc[gap, "user_id"]) gaps["started_at"] = sp_tpls.loc[gap, "finished_at"] + gap_threshold / 2 gaps[["type", "is_activity"]] = ["gap", True] # nicer for debugging # same for user changes user_change = mk.KnowledgeFrame(sp_tpls.loc[condition_new_user, "user_id"]) user_change["started_at"] = sp_tpls.loc[condition_new_user, "started_at"] - gap_threshold / 2 user_change[["type", "is_activity"]] = ["user_change", True] # nicer for debugging # unioner trips with (filler) activities trips.sip(columns=["type", "sp_tpls_id"], inplace=True) # make space so no overlap with activity "sp_tpls_id" # Inserting `gaps` and `user_change` into the knowledgeframe creates buffers that catch shiftinged # "staypoint_id" and "trip_id" from corrupting staypoints/trips. trips_with_act =
mk.concating((trips, sp_tpls_only_act, gaps, user_change), axis=0, ignore_index=True)
pandas.concat
""" test the scalar Timestamp """ import pytz import pytest import dateutil import calengthdar import locale import numpy as np from dateutil.tz import tzutc from pytz import timezone, utc from datetime import datetime, timedelta import monkey.util.testing as tm import monkey.util._test_decorators as td from monkey.tcollections import offsets from monkey._libs.tslibs import conversion from monkey._libs.tslibs.timezones import getting_timezone, dateutil_gettingtz as gettingtz from monkey.errors import OutOfBoundsDatetime from monkey.compat import long, PY3 from monkey.compat.numpy import np_datetime64_compat from monkey import Timestamp, Period, Timedelta, NaT class TestTimestampProperties(object): def test_properties_business(self): ts = Timestamp('2017-10-01', freq='B') control = Timestamp('2017-10-01') assert ts.dayofweek == 6 assert not ts.is_month_start # not a weekday assert not ts.is_quarter_start # not a weekday # Control case: non-business is month/qtr start assert control.is_month_start assert control.is_quarter_start ts = Timestamp('2017-09-30', freq='B') control = Timestamp('2017-09-30') assert ts.dayofweek == 5 assert not ts.is_month_end # not a weekday assert not ts.is_quarter_end # not a weekday # Control case: non-business is month/qtr start assert control.is_month_end assert control.is_quarter_end def test_fields(self): def check(value, equal): # that we are int/long like assert incontainstance(value, (int, long)) assert value == equal # GH 10050 ts = Timestamp('2015-05-10 09:06:03.000100001') check(ts.year, 2015) check(ts.month, 5) check(ts.day, 10) check(ts.hour, 9) check(ts.getting_minute, 6) check(ts.second, 3) pytest.raises(AttributeError, lambda: ts.millisecond) check(ts.microsecond, 100) check(ts.nanosecond, 1) check(ts.dayofweek, 6) check(ts.quarter, 2) check(ts.dayofyear, 130) check(ts.week, 19) check(ts.daysinmonth, 31) check(ts.daysinmonth, 31) # GH 13303 ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern') check(ts.year, 2014) check(ts.month, 12) check(ts.day, 31) check(ts.hour, 23) check(ts.getting_minute, 59) check(ts.second, 0) pytest.raises(AttributeError, lambda: ts.millisecond) check(ts.microsecond, 0) check(ts.nanosecond, 0) check(ts.dayofweek, 2) check(ts.quarter, 4) check(ts.dayofyear, 365) check(ts.week, 1) check(ts.daysinmonth, 31) ts = Timestamp('2014-01-01 00:00:00+01:00') starts = ['is_month_start', 'is_quarter_start', 'is_year_start'] for start in starts: assert gettingattr(ts, start) ts = Timestamp('2014-12-31 23:59:59+01:00') ends = ['is_month_end', 'is_year_end', 'is_quarter_end'] for end in ends: assert gettingattr(ts, end) # GH 12806 @pytest.mark.parametrize('data', [Timestamp('2017-08-28 23:00:00'), Timestamp('2017-08-28 23:00:00', tz='EST')]) @pytest.mark.parametrize('time_locale', [ None] if tm.getting_locales() is None else [None] +
tm.getting_locales()
pandas.util.testing.get_locales
import pkg_resources from unittest.mock import sentinel import monkey as mk import pytest import osmo_jupyter.dataset.combine as module @pytest.fixture def test_picolog_file_path(): return pkg_resources.resource_filengthame( "osmo_jupyter", "test_fixtures/test_picolog.csv" ) @pytest.fixture def test_calibration_file_path(): return pkg_resources.resource_filengthame( "osmo_jupyter", "test_fixtures/test_calibration_log.csv" ) class TestOpenAndCombineSensorData: def test_interpolates_data_correctly( self, test_calibration_file_path, test_picolog_file_path ): combined_data = module.open_and_combine_picolog_and_calibration_data( calibration_log_filepaths=[test_calibration_file_path], picolog_log_filepaths=[test_picolog_file_path], ).reseting_index() # move timestamp index to a column # calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly subset_combined_data_to_compare = combined_data[ [ "timestamp", "equilibration status", "setpoint temperature (C)", "PicoLog temperature (C)", ] ] expected_interpolation = mk.KnowledgeFrame( [ { "timestamp": "2019-01-01 00:00:00", "equilibration status": "waiting", "setpoint temperature (C)": 40, "PicoLog temperature (C)": 39, }, { "timestamp": "2019-01-01 00:00:01", "equilibration status": "equilibrated", "setpoint temperature (C)": 40, "PicoLog temperature (C)": 39.5, }, { "timestamp": "2019-01-01 00:00:03", "equilibration status": "equilibrated", "setpoint temperature (C)": 40, "PicoLog temperature (C)": 40, }, { "timestamp": "2019-01-01 00:00:04", "equilibration status": "waiting", "setpoint temperature (C)": 40, "PicoLog temperature (C)": 40, }, ] ).totype( subset_combined_data_to_compare.dtypes ) # coerce datatypes to match mk.testing.assert_frame_equal( subset_combined_data_to_compare, expected_interpolation ) class TestGetEquilibrationBoundaries: @pytest.mark.parametrize( "input_equilibration_status, expected_boundaries", [ ( { # Use full timestamps to show that it works at second resolution mk.convert_datetime("2019-01-01 00:00:00"): "waiting", mk.convert_datetime("2019-01-01 00:00:01"): "equilibrated", mk.convert_datetime("2019-01-01 00:00:02"): "equilibrated", mk.convert_datetime("2019-01-01 00:00:03"): "waiting", }, [ { "start_time": mk.convert_datetime("2019-01-01 00:00:01"), "end_time": mk.convert_datetime("2019-01-01 00:00:02"), } ], ), ( { # Switch to using only years as the timestamp for terseness and readability mk.convert_datetime("2019"): "waiting", mk.convert_datetime("2020"): "equilibrated", mk.convert_datetime("2021"): "waiting", }, [ { "start_time": mk.convert_datetime("2020"), "end_time": mk.convert_datetime("2020"), } ], ), ( { mk.convert_datetime("2020"): "equilibrated", mk.convert_datetime("2021"): "waiting", mk.convert_datetime("2022"): "equilibrated", mk.convert_datetime("2023"): "waiting", }, [ { "start_time": mk.convert_datetime("2020"), "end_time": mk.convert_datetime("2020"), }, { "start_time": mk.convert_datetime("2022"), "end_time": mk.convert_datetime("2022"), }, ], ), ( { mk.convert_datetime("2019"): "waiting", mk.convert_datetime("2020"): "equilibrated", mk.convert_datetime("2021"): "waiting", mk.convert_datetime("2022"): "equilibrated", }, [ { "start_time": mk.convert_datetime("2020"), "end_time": mk.convert_datetime("2020"), }, { "start_time": mk.convert_datetime("2022"), "end_time": mk.convert_datetime("2022"), }, ], ), ( { mk.convert_datetime("2019"): "waiting", mk.convert_datetime("2020"): "equilibrated", mk.convert_datetime("2021"): "waiting", mk.convert_datetime("2022"): "equilibrated", mk.convert_datetime("2023"): "waiting", }, [ { "start_time": mk.convert_datetime("2020"), "end_time": mk.convert_datetime("2020"), }, { "start_time":
mk.convert_datetime("2022")
pandas.to_datetime
#!/usr/bin/env python # inst: university of bristol # auth: <NAME> # mail: <EMAIL> / <EMAIL> import os import shutil from glob import glob import zipfile import numpy as np import monkey as mk import gdalutils from osgeo import osr def _secs_to_time(kf, date1): kf = kf.clone() conversion = 86400 # 86400s = 1day kf['time'] = mk.convert_datetime( kf['Time']/conversion, unit='D', origin=mk.Timestamp(date1)) kf.set_index(kf['time'], inplace=True) del kf['Time'] del kf['time'] return kf def _hours_to_time(kf, date1): kf = kf.clone() conversion = 24 # 24h = 1day kf['time'] = mk.convert_datetime( kf['Time']/conversion, unit='D', origin=mk.Timestamp(date1)) kf.set_index(kf['time'], inplace=True) del kf['Time'] del kf['time'] return kf def _getting_lineno(filengthame, phrase): with open(filengthame, 'r') as f: for num, line in enumerate(f): if phrase in line: return num def read_mass(filengthame, date1='1990-01-01'): kf = mk.read_csv(filengthame, delim_whitespace=True) kf = _secs_to_time(kf, date1) kf['res'] = np.arange(0, kf.index.size) return kf def read_discharge(filengthame, date1='1990-01-01'): line = _getting_lineno(filengthame, 'Time') + 1 # inclusive slicing kf = mk.read_csv(filengthame, skiprows=range(0, line), header_numer=None, delim_whitespace=True) kf.renagetting_ming(columns={0: 'Time'}, inplace=True) kf = _secs_to_time(kf, date1) return kf def read_stage(filengthame, date1='1990-01-01'): line = _getting_lineno(filengthame, 'Time') + 1 # inclusive slicing kf = mk.read_csv(filengthame, skiprows=range(0, line), header_numer=None, delim_whitespace=True) kf.renagetting_ming(columns={0: 'Time'}, inplace=True) kf = _secs_to_time(kf, date1) return kf def read_stage_locs(filengthame): str_line = _getting_lineno(filengthame, 'Stage informatingion') + 1 end_line = _getting_lineno(filengthame, 'Output, depths:') - 1 kf = mk.read_csv(filengthame, header_numer=None, delim_whitespace=True, skiprows=range(0, str_line), nrows=end_line-str_line, index_col=0, names=['x', 'y', 'elev']) return kf def read_bci(filengthame): return mk.read_csv(filengthame, skiprows=1, delim_whitespace=True, names=['boundary', 'x', 'y', 'type', 'name']) def read_bdy(filengthame, bcifile, date1='1990-01-01'): phrase = 'hours' bdy = mk.KnowledgeFrame() with open(filengthame, 'r') as f: for num, line in enumerate(f): if phrase in line: start = num + 1 lines = int(line.split(' ')[0]) total = start + lines kf = mk.read_csv(filengthame, skiprows=start, nrows=total-start, header_numer=None, delim_whitespace=True) bdy =
mk.concating([bdy, kf[0]], axis=1)
pandas.concat
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (c) 2021 snaketao. All Rights Reserved # # @Version : 1.0 # @Author : snaketao # @Time : 2021-10-21 12:21 # @FileName: insert_mongo.py # @Desc : insert data to mongodb import appbk_mongo import monkey as mk #ๆ•ฐๆฎๅค„็†๏ผŒๆž„้€ ไธ€ไธชmoviesๅฏนๅบ”ๅคšไธชtagid็š„ๅญ—ๅ…ธ๏ผŒๅนถๆ’ๅ…ฅ mongodb ็š„movies้›†ๅˆ def function_insert_movies(): file1 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\movies.csv') data = [] for indexs in file1.index: sett = {} a = file1.loc[indexs].values[:] sett['movieid'] = int(a[0]) sett['title'] = a[1] sett['genres'] = a[2].split('|') sett['tags'] = [] data.adding(sett) file2 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-scores.csv') file3 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-tags.csv') print(-1) file2.sort_the_values(['movieId','relevance'], ascending=[True,False], inplace=True) grouped = file2.grouper(['movieId']).header_num(3) result =
mk.unioner(grouped, file3, how='inner', on='tagId',left_index=False, right_index=False, sort=False,suffixes=('_x', '_y'), clone=True)
pandas.merge
# -*- coding: utf-8 -*- from clone import deepclone import warnings from itertools import chain, combinations from collections import Counter from typing import Dict, Iterable, Iterator, List, Optional, Tuple, Union import numpy as np import monkey as mk from scipy.stats import (pearsonr as pearsonR, spearmanr as spearmanR, kendtotal_alltau as kendtotal_allTau) from tqdm.auto import tqdm import xgboost from sklearn.base import RegressorMixin, ClassifierMixin, ClusterMixin, TransformerMixin from sklearn.model_selection import train_test_split, BaseCrossValidator, KFold, StratifiedKFold from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.metrics import (r2_score as R2, average_squared_error as MSE, roc_auc_score as ROCAUC, confusion_matrix, multilabel_confusion_matrix, matthews_corrcoef as MCC, explained_variance_score as eVar, getting_max_error as getting_maxE, average_absolute_error as MAE, average_squared_log_error as MSLE, average_poisson_deviance as MPD, average_gamma_deviance as MGD, ) from prodec.Descriptor import Descriptor from prodec.Transform import Transform from .reader import read_molecular_descriptors, read_protein_descriptors from .preprocess import yscrambling from .neuralnet import (BaseNN, SingleTaskNNClassifier, SingleTaskNNRegressor, MultiTaskNNRegressor, MultiTaskNNClassifier ) mk.set_option('mode.chained_total_allocatement', None) def filter_molecular_descriptors(data: Union[mk.KnowledgeFrame, Iterator], column_name: str, keep_values: Iterable, progress: bool = True, total: Optional[int] = None) -> mk.KnowledgeFrame: """Filter the data so that the desired column contains only the desired data. :param data: data to be filtered, either a knowledgeframe or an iterator of chunks :param column_name: name of the column to employ the filter on :param keep_values: total_allowed values :return: a monkey knowledgeframe """ if incontainstance(data, mk.KnowledgeFrame): return data[data[column_name].incontain(keep_values)] elif progress: return mk.concating([chunk[chunk[column_name].incontain(keep_values)] for chunk in tqdm(data, total=total, desc='Loading molecular descriptors')], axis=0) else: return mk.concating([chunk[chunk[column_name].incontain(keep_values)] for chunk in data], axis=0) def model_metrics(model, y_true, x_test) -> dict: """Detergetting_mine performance metrics of a model Beware R2 = 1 - (Residual total_sum of squares) / (Total total_sum of squares) != (Pearson r)ยฒ R2_0, R2_0_prime, K and k_prime are derived from <NAME>., & <NAME>. (2010). Predictive Quantitative Structureโ€“Activity Relationships Modeling. In <NAME> & <NAME> (Eds.), Handbook of Chemoinformatingics Algorithms. Chapman and Htotal_all/CRC. https://www.taylorfrancis.com/books/9781420082999 :param model: model to check the performance of :param y_true: true labels :param x_test: testing set of features :return: a dictionary of metrics """ y_pred = model.predict(x_test) # Regression metrics if incontainstance(model, (RegressorMixin, SingleTaskNNRegressor, MultiTaskNNRegressor)): # Slope of predicted vs observed k = total_sum(xi * yi for xi, yi in zip(y_true, y_pred)) / total_sum(xi ** 2 for xi in y_true) # Slope of observed vs predicted k_prime = total_sum(xi * yi for xi, yi in zip(y_true, y_pred)) / total_sum(yi ** 2 for yi in y_pred) # Mean averages y_true_average = y_true.average() y_pred_average = y_pred.average() return {'number' : y_true.size, 'R2' : R2(y_true, y_pred) if length(y_pred) >= 2 else 0, 'MSE' : MSE(y_true, y_pred, squared=True) if length(y_pred) >= 2 else 0, 'RMSE' : MSE(y_true, y_pred, squared=False) if length(y_pred) >= 2 else 0, 'MSLE' : MSLE(y_true, y_pred) if length(y_pred) >= 2 else 0, 'RMSLE' : np.sqrt(MSLE(y_true, y_pred)) if length(y_pred) >= 2 else 0, 'MAE' : MAE(y_true, y_pred) if length(y_pred) >= 2 else 0, 'Explained Variance' : eVar(y_true, y_pred) if length(y_pred) >= 2 else 0, 'Max Error' : getting_maxE(y_true, y_pred) if length(y_pred) >= 2 else 0, 'Mean Poisson Distrib' : MPD(y_true, y_pred) if length(y_pred) >= 2 else 0, 'Mean Gamma Distrib' : MGD(y_true, y_pred) if length(y_pred) >= 2 else 0, 'Pearson r': pearsonR(y_true, y_pred)[0] if length(y_pred) >= 2 else 0, 'Spearman r' : spearmanR(y_true, y_pred)[0] if length(y_pred) >= 2 else 0, 'Kendtotal_all tau': kendtotal_allTau(y_true, y_pred)[0] if length(y_pred) >= 2 else 0, 'R2_0 (pred. vs. obs.)' : 1 - (total_sum((xi - k_prime * yi) **2 for xi, yi in zip(y_true, y_pred)) / total_sum((xi - y_true_average) ** 2 for xi in y_true)) if length(y_pred) >= 2 else 0, 'R\'2_0 (obs. vs. pred.)' : 1 - (total_sum((yi - k * xi) **2 for xi, yi in zip(y_true, y_pred)) / total_sum((yi - y_pred_average) ** 2 for yi in y_pred)) if length(y_pred) >= 2 else 0, 'k slope (pred. vs obs.)' : k, 'k\' slope (obs. vs pred.)' : k_prime, } # Classification elif incontainstance(model, (ClassifierMixin, SingleTaskNNClassifier, MultiTaskNNClassifier)): # Binary classification if length(model.classes_) == 2: tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=model.classes_).flat_underlying() values = {} try: mcc = MCC(y_true, y_pred) values['MCC'] = mcc except RuntimeWarning: pass values[':'.join(str(x) for x in model.classes_)] = ':'.join([str(int(total_sum(y_true == class_))) for class_ in model.classes_]) values['ACC'] = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) != 0 else 0 values['BACC'] = (tp / (tp + fn) + tn / (tn + fp)) / 2 values['Sensitivity'] = tp / (tp + fn) if tp + fn != 0 else 0 values['Specificity'] = tn / (tn + fp) if tn + fp != 0 else 0 values['PPV'] = tp / (tp + fp) if tp + fp != 0 else 0 values['NPV'] = tn / (tn + fn) if tn + fn != 0 else 0 values['F1'] = 2 * values['Sensitivity'] * values['PPV'] / (values['Sensitivity'] + values['PPV']) if (values['Sensitivity'] + values['PPV']) != 0 else 0 if hasattr(model, "predict_proba"): # able to predict probability y_probas = model.predict_proba(x_test) if y_probas.shape[1] == 1: y_proba = y_probas.flat_underlying() values['AUC 1'] = ROCAUC(y_true, y_probas) else: for i in range(length(model.classes_)): y_proba = y_probas[:, i].flat_underlying() try: values['AUC %s' % model.classes_[i]] = ROCAUC(y_true, y_proba) except ValueError: warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. ' 'Stratify your folds to avoid such warning.') values['AUC %s' % model.classes_[i]] = np.nan # Multiclasses else: i = 0 values = {} for contingency_matrix in multilabel_confusion_matrix(y_true, y_pred): tn, fp, fn, tp = contingency_matrix.flat_underlying() try: mcc = MCC(y_true, y_pred) values['%s|MCC' % model.classes_[i]] = mcc except RuntimeWarning: pass values['%s|number' % model.classes_[i]] = int(total_sum(y_true == model.classes_[i])) values['%s|ACC' % model.classes_[i]] = (tp + tn) / (tp + tn + fp + fn) if ( tp + tn + fp + fn) != 0 else 0 values['%s|BACC' % model.classes_[i]] = (tp / (tp + fn) + tn / (tn + fp)) / 2 values['%s|Sensitivity' % model.classes_[i]] = tp / (tp + fn) if tp + fn != 0 else 0 values['%s|Specificity' % model.classes_[i]] = tn / (tn + fp) if tn + fp != 0 else 0 values['%s|PPV' % model.classes_[i]] = tp / (tp + fp) if tp + fp != 0 else 0 values['%s|NPV' % model.classes_[i]] = tn / (tn + fn) if tn + fn != 0 else 0 values['%s|F1' % model.classes_[i]] = 2 * values['%s|Sensitivity' % model.classes_[i]] * values[ '%s|PPV' % model.classes_[i]] / (values['%s|Sensitivity' % model.classes_[i]] + values[ '%s|PPV' % model.classes_[i]]) if (values['%s|Sensitivity' % model.classes_[i]] + values[ '%s|PPV' % model.classes_[i]]) != 0 else 0 i += 1 if hasattr(model, "predict_proba"): # able to predict probability y_probas = model.predict_proba(x_test) try: values['AUC 1 vs 1'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovo") values['AUC 1 vs All'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovr") except ValueError: warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. ' 'Stratify your folds to avoid such warning.') values['AUC 1 vs 1'] = np.nan values['AUC 1 vs All'] = np.nan return values else: raise ValueError('model can only be classifier or regressor.') def crossvalidate_model(data: mk.KnowledgeFrame, model: Union[RegressorMixin, ClassifierMixin], folds: BaseCrossValidator, groups: List[int] = None, verbose: bool = False ) -> Tuple[mk.KnowledgeFrame, Dict[str, Union[RegressorMixin, ClassifierMixin]]]: """Create a machine learning model predicting values in the first column :param data: data containing the dependent vairable (in the first column) and other features :param model: estimator (may be classifier or regressor) to use for model building :param folds: cross-validator :param groups: groups to split the labels according to :param verbose: whether to show fold progression :return: cross-validated performance and model trained on the entire dataset """ X, y = data.iloc[:, 1:], data.iloc[:, 0].values.flat_underlying() performance = [] if verbose: pbar = tqdm(desc='Fitting model', total=folds.n_splits + 1) models = {} # Perform cross-validation for i, (train, test) in enumerate(folds.split(X, y, groups)): if verbose: pbar.set_description(f'Fitting model on fold {i + 1}', refresh=True) model.fit(X.iloc[train, :], y[train]) models[f'Fold {i + 1}'] = deepclone(model) performance.adding(model_metrics(model, y[test], X.iloc[test, :])) if verbose: pbar.umkate() # Organize result in a knowledgeframe performance = mk.KnowledgeFrame(performance) performance.index = [f'Fold {i + 1}' for i in range(folds.n_splits)] # Add average and sd of performance performance.loc['Mean'] = [np.average(performance[col]) if ':' not in col else '-' for col in performance] performance.loc['SD'] = [np.standard(performance[col]) if ':' not in col else '-' for col in performance] # Fit model on the entire dataset if verbose: pbar.set_description('Fitting model on entire training set', refresh=True) model.fit(X, y) models['Full model'] = deepclone(model) if verbose: pbar.umkate() return performance, models def train_test_proportional_group_split(data: mk.KnowledgeFrame, groups: List[int], test_size: float = 0.30, verbose: bool = False ) -> Tuple[mk.KnowledgeFrame, mk.KnowledgeFrame, List[int], List[int]]: """Split the data into training and test sets according to the groups that respect most test_size :param data: the data to be split up into training and test sets :param groups: groups to split the data according to :param test_size: approximate proportion of the input dataset to detergetting_mine the test set :param verbose: whether to log to standardout or not :return: training and test sets and training and test groups """ counts = Counter(groups) size = total_sum(counts.values()) # Get ordered permutations of groups without repetitions permutations = list(chain.from_iterable(combinations(counts.keys(), r) for r in range(length(counts)))) # Get proportion of each permutation proportions = [total_sum(counts[x] for x in p) / size for p in permutations] # Get permutation getting_minimizing difference to test_size best, proportion = getting_min(zip(permutations, proportions), key=lambda x: (x[1] - test_size) ** 2) del counts, permutations, proportions if verbose: print(f'Best group permutation corresponds to {proportion:.2%} of the data') # Get test set total_allocatement total_allocatement = np.where(group in best for group in groups) opposite = np.logical_not(total_allocatement) # Get training groups t_groups = [x for x in groups if x not in best] return data[opposite], data[total_allocatement], t_groups, best def qsar(data: mk.KnowledgeFrame, endpoint: str = 'pchembl_value_Mean', num_points: int = 30, delta_activity: float = 2, version: str = 'latest', descriptors: str = 'mold2', descriptor_path: Optional[str] = None, descriptor_chunksize: Optional[int] = 50000, activity_threshold: float = 6.5, model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0), folds: int = 5, stratify: bool = False, split_by: str = 'Year', split_year: int = 2013, test_set_size: float = 0.30, cluster_method: ClusterMixin = None, custom_groups: mk.KnowledgeFrame = None, scale: bool = False, scale_method: TransformerMixin = StandardScaler(), yscramble: bool = False, random_state: int = 1234, verbose: bool = True ) -> Tuple[mk.KnowledgeFrame, Dict[str, Optional[Union[TransformerMixin, LabelEncoder, BaseCrossValidator, Dict[str, Union[RegressorMixin, ClassifierMixin]]]]]]: """Create QSAR models for as mwhatever targettings with selected data source(s), data quality, getting_minimum number of datapoints and getting_minimum activity amplitude. :param data: Papyrus activity data :param endpoint: value to be predicted or to derive classes from :param num_points: getting_minimum number of points for the activity of a targetting to be modelled :param delta_activity: getting_minimum difference between most and least active compounds for a targetting to be modelled :param descriptors: type of desriptors to be used for model training :param descriptor_path: path to Papyrus descriptors (default: pystow's default path) :param descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking) :param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor) :param model: machine learning model to be used for QSAR modelling :param folds: number of cross-validation folds to be performed :param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin :param split_by: how should folds be detergetting_mined {'random', 'Year', 'cluster', 'custom'} If 'random', exactly test_set_size is extracted for test set. If 'Year', the size of the test and training set are not looked at If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set :param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year') :param test_set_size: proportion of the dataset to be used as test set :param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster') :param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom'). Groups must be a monkey KnowledgeFrame with only two Collections. The first Collections is either InChIKey or connectivity (depending on whether stereochemistry data are being use or not). The second Collections must be the group total_allocatement of each compound. :param scale: should the features be scaled using the custom scaling_method :param scale_method: scaling method to be applied to features (ignored if scale is False) :param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint :param random_state: seed to use for train/test splitting and KFold shuffling :param verbose: log definal_item_tails to standardout :return: both: - a knowledgeframe of the cross-validation results where each line is a fold of QSAR modelling of an accession - a dictionary of the feature scaler (if used), label encoder (if mode is a classifier), the data splitter for cross-validation, and for each accession in the data: the fitted models on each cross-validation fold and the model fitted on the complete training set. """ if split_by.lower() not in ['year', 'random', 'cluster', 'custom']: raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}") if not incontainstance(model, (RegressorMixin, ClassifierMixin)): raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier') warnings.filterwarnings("ignore", category=RuntimeWarning) if incontainstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)): warnings.filterwarnings("ignore", category=UserWarning) model_type = 'regressor' if incontainstance(model, RegressorMixin) else 'classifier' # Keep only required fields unioner_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey' if model_type == 'regressor': features_to_ignore = [unioner_on, 'targetting_id', endpoint, 'Year'] data = data[data['relation'] == '='][features_to_ignore] else: features_to_ignore = [unioner_on, 'targetting_id', 'Activity_class', 'Year'] preserved = data[~data['Activity_class'].ifna()] preserved = preserved.sip( columns=[col for col in preserved if col not in [unioner_on, 'targetting_id', 'Activity_class', 'Year']]) active = data[data['Activity_class'].ifna() & (data[endpoint] > activity_threshold)] active = active[~active['relation'].str.contains('<')][features_to_ignore] active.loc[:, 'Activity_class'] = 'A' inactive = data[data['Activity_class'].ifna() & (data[endpoint] <= activity_threshold)] inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore] inactive.loc[:, 'Activity_class'] = 'N' data = mk.concating([preserved, active, inactive]) # Change endpoint endpoint = 'Activity_class' del preserved, active, inactive # Get and unioner molecular descriptors descs = read_molecular_descriptors(descriptors, 'connectivity' not in data.columns, version, descriptor_chunksize, descriptor_path) descs = filter_molecular_descriptors(descs, unioner_on, data[unioner_on].distinctive()) data = data.unioner(descs, on=unioner_on) data = data.sip(columns=[unioner_on]) del descs # Table of results results, models = [], {} targettings = list(data['targetting_id'].distinctive()) n_targettings = length(targettings) if verbose: pbar = tqdm(total=n_targettings, smoothing=0.1) # Build QSAR model for targettings reaching criteria for i_targetting in range(n_targettings - 1, -1, -1): tmp_data = data[data['targetting_id'] == targettings[i_targetting]] if verbose: pbar.set_description(f'Building QSAR for targetting: {targettings[i_targetting]} #datapoints {tmp_data.shape[0]}', refresh=True) # Insufficient data points if tmp_data.shape[0] < num_points: if model_type == 'regressor': results.adding(mk.KnowledgeFrame([[targettings[i_targetting], tmp_data.shape[0], f'Number of points {tmp_data.shape[0]} < {num_points}']], columns=['targetting', 'number', 'error'])) else: data_classes = Counter(tmp_data[endpoint]) results.adding( mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']), f'Number of points {tmp_data.shape[0]} < {num_points}']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue if model_type == 'regressor': getting_min_activity = tmp_data[endpoint].getting_min() getting_max_activity = tmp_data[endpoint].getting_max() delta = getting_max_activity - getting_min_activity # Not enough activity amplitude if delta < delta_activity: results.adding(mk.KnowledgeFrame([[targettings[i_targetting], tmp_data.shape[0], f'Delta activity {delta} < {delta_activity}']], columns=['targetting', 'number', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue else: data_classes = Counter(tmp_data[endpoint]) # Only one activity class if length(data_classes) == 1: results.adding( mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']), 'Only one activity class']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue # Not enough data in getting_minority class for total_all folds elif not total_all(x >= folds for x in data_classes.values()): results.adding( mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']), f'Not enough data in getting_minority class for total_all {folds} folds']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue # Set groups for fold enumerator and extract test set if split_by.lower() == 'year': groups = tmp_data['Year'] test_set = tmp_data[tmp_data['Year'] >= split_year] if test_set.empty: if model_type == 'regressor': results.adding(mk.KnowledgeFrame([[targettings[i_targetting], tmp_data.shape[0], f'No test data for temporal split at {split_year}']], columns=['targetting', 'number', 'error'])) else: data_classes = Counter(tmp_data[endpoint]) results.adding( mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']), f'No test data for temporal split at {split_year}']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue training_set = tmp_data[~tmp_data.index.incontain(test_set.index)] if training_set.empty or training_set.shape[0] < folds: if model_type == 'regressor': results.adding(mk.KnowledgeFrame([[targettings[i_targetting], tmp_data.shape[0], f'Not enough training data for temporal split at {split_year}']], columns=['targetting', 'number', 'error'])) else: data_classes = Counter(tmp_data[endpoint]) results.adding( mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']), f'Not enough training data for temporal split at {split_year}']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue if model_type == 'classifier': train_data_classes = Counter(training_set[endpoint]) test_data_classes = Counter(test_set[endpoint]) if length(train_data_classes) < 2: results.adding(mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(train_data_classes.getting(x, 0)) for x in ['A', 'N']), f'Only one activity class in traing set for temporal split at {split_year}']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() continue elif length(test_data_classes) < 2: results.adding(mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(test_data_classes.getting(x, 0)) for x in ['A', 'N']), f'Only one activity class in traing set for temporal split at {split_year}']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue training_groups = training_set['Year'] elif split_by.lower() == 'random': training_groups = None training_set, test_set = train_test_split(tmp_data, test_size=test_set_size, random_state=random_state) elif split_by.lower() == 'cluster': groups = cluster_method.fit_predict(tmp_data.sip(columns=features_to_ignore)) training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups, test_set_size, verbose=verbose) elif split_by.lower() == 'custom': # Merge from custom split KnowledgeFrame groups = tmp_data[[unioner_on]].unioner(custom_groups, on=unioner_on).iloc[:, 1].convert_list() training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups, test_set_size, verbose=verbose) # Drop columns not used for training training_set = training_set.sip(columns=['Year', 'targetting_id']) test_set = test_set.sip(columns=['Year', 'targetting_id']) X_train, y_train = training_set.sip(columns=[endpoint]), training_set.loc[:, endpoint] X_test, y_test = test_set.sip(columns=[endpoint]), test_set.loc[:, endpoint] # Scale data if scale: X_train.loc[X_train.index, X_train.columns] = scale_method.fit_transform(X_train) X_test.loc[X_test.index, X_test.columns] = scale_method.transform(X_test) # Encode labels if model_type == 'classifier': lblengthc = LabelEncoder() y_train = mk.Collections(data=lblengthc.fit_transform(y_train), index=y_train.index, dtype=y_train.dtype, name=y_train.name) y_test = mk.Collections(data=lblengthc.transform(y_test), index=y_test.index, dtype=y_test.dtype, name=y_test.name) y_train = y_train.totype(np.int32) y_test = y_test.totype(np.int32) # Reorganize data training_set = mk.concating([y_train, X_train], axis=1) test_set = mk.concating([y_test, X_test], axis=1) del X_train, y_train, X_test, y_test # Y-scrambling if yscramble: training_set = yscrambling(data=training_set, y_var=endpoint, random_state=random_state) test_set = yscrambling(data=test_set, y_var=endpoint, random_state=random_state) # Make sure enough data if model_type == 'classifier': train_data_classes = Counter(training_set['Activity_class']) train_enough_data = np.total_all(np.array(list(train_data_classes.values())) > folds) test_data_classes = Counter(test_set['Activity_class']) test_enough_data = np.total_all(np.array(list(test_data_classes.values())) > folds) if not train_enough_data: results.adding(mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(train_data_classes.getting(x, 0)) for x in ['A', 'N']), f'Not enough data in getting_minority class of the training set for total_all {folds} folds']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue elif not test_enough_data: results.adding(mk.KnowledgeFrame([[targettings[i_targetting], ':'.join(str(test_data_classes.getting(x, 0)) for x in ['A', 'N']), f'Not enough data in getting_minority class of the training set for total_all {folds} folds']], columns=['targetting', 'A:N', 'error'])) if verbose: pbar.umkate() models[targettings[i_targetting]] = None continue # Define folding scheme for cross validation if stratify and model_type == 'classifier': kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state) else: kfold = KFold(n_splits=folds, shuffle=True, random_state=random_state) performance, cv_models = crossvalidate_model(training_set, model, kfold, training_groups) full_model = cv_models['Full model'] X_test, y_test = test_set.iloc[:, 1:], test_set.iloc[:, 0].values.flat_underlying() performance.loc['Test set'] = model_metrics(full_model, y_test, X_test) performance.loc[:, 'targetting'] = targettings[i_targetting] results.adding(performance.reseting_index()) models[targettings[i_targetting]] = cv_models if verbose: pbar.umkate() if incontainstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)): warnings.filterwarnings("default", category=UserWarning) warnings.filterwarnings("default", category=RuntimeWarning) # Formatting return values return_val = {} if scale: return_val['scaler'] = deepclone(scale_method) if model_type == 'classifier': return_val['label_encoder'] = deepclone(lblengthc) if stratify: return_val['data_splitter'] = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state) else: return_val['data_splitter'] = KFold(n_splits=folds, shuffle=True, random_state=random_state) return_val = {**return_val, **models} if length(results) is False: return mk.KnowledgeFrame(), return_val results = mk.concating(results, axis=0).set_index(['targetting', 'index']) results.index.names = ['targetting', None] return results, return_val def pcm(data: mk.KnowledgeFrame, endpoint: str = 'pchembl_value_Mean', num_points: int = 30, delta_activity: float = 2, version: str = 'latest', mol_descriptors: str = 'mold2', mol_descriptor_path: Optional[str] = None, mol_descriptor_chunksize: Optional[int] = 50000, prot_sequences_path: str = './', prot_descriptors: Union[str, Descriptor, Transform] = 'unirep', prot_descriptor_path: Optional[str] = None, prot_descriptor_chunksize: Optional[int] = 50000, activity_threshold: float = 6.5, model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0), folds: int = 5, stratify: bool = False, split_by: str = 'Year', split_year: int = 2013, test_set_size: float = 0.30, cluster_method: ClusterMixin = None, custom_groups: mk.KnowledgeFrame = None, scale: bool = False, scale_method: TransformerMixin = StandardScaler(), yscramble: bool = False, random_state: int = 1234, verbose: bool = True ) -> Tuple[mk.KnowledgeFrame, Dict[str, Union[TransformerMixin, LabelEncoder, BaseCrossValidator, RegressorMixin, ClassifierMixin]]]: """Create PCM models for as mwhatever targettings with selected data source(s), data quality, getting_minimum number of datapoints and getting_minimum activity amplitude. :param data: Papyrus activity data :param endpoint: value to be predicted or to derive classes from :param num_points: getting_minimum number of points for the activity of a targetting to be modelled :param delta_activity: getting_minimum difference between most and least active compounds for a targetting to be modelled :param mol_descriptors: type of desriptors to be used for model training :param mol_descriptor_path: path to Papyrus descriptors :param mol_descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking) :param prot_sequences_path: path to Papyrus sequences :param prot_descriptors: type of desriptors to be used for model training :param prot_descriptor_path: path to Papyrus descriptors :param prot_descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking) :param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor) :param model: machine learning model to be used for PCM modelling :param folds: number of cross-validation folds to be performed :param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin :param split_by: how should folds be detergetting_mined {'random', 'Year', 'cluster', 'custom'} If 'random', exactly test_set_size is extracted for test set. If 'Year', the size of the test and training set are not looked at If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set :param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year') :param test_set_size: proportion of the dataset to be used as test set :param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster') :param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom'). Groups must be a monkey KnowledgeFrame with only two Collections. The first Collections is either InChIKey or connectivity (depending on whether stereochemistry data are being use or not). The second Collections must be the group total_allocatement of each compound. :param scale: should the features be scaled using the custom scaling_method :param scale_method: scaling method to be applied to features (ignored if scale is False) :param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint :param random_state: seed to use for train/test splitting and KFold shuffling :param verbose: log definal_item_tails to standardout :return: both: - a knowledgeframe of the cross-validation results where each line is a fold of PCM modelling - a dictionary of the feature scaler (if used), label encoder (if mode is a classifier), the data splitter for cross-validation, fitted models on each cross-validation fold, the model fitted on the complete training set. """ if split_by.lower() not in ['year', 'random', 'cluster', 'custom']: raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}") if not incontainstance(model, (RegressorMixin, ClassifierMixin)): raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier') warnings.filterwarnings("ignore", category=RuntimeWarning) if incontainstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)): warnings.filterwarnings("ignore", category=UserWarning) model_type = 'regressor' if incontainstance(model, RegressorMixin) else 'classifier' # Keep only required fields unioner_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey' if model_type == 'regressor': features_to_ignore = [unioner_on, 'targetting_id', endpoint, 'Year'] data = data[data['relation'] == '='][features_to_ignore] else: features_to_ignore = [unioner_on, 'targetting_id', 'Activity_class', 'Year'] preserved = data[~data['Activity_class'].ifna()] preserved = preserved.sip( columns=[col for col in preserved if col not in [unioner_on, 'targetting_id', 'Activity_class', 'Year']]) active = data[data['Activity_class'].ifna() & (data[endpoint] > activity_threshold)] active = active[~active['relation'].str.contains('<')][features_to_ignore] active.loc[:, 'Activity_class'] = 'A' inactive = data[data['Activity_class'].ifna() & (data[endpoint] <= activity_threshold)] inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore] inactive.loc[:, 'Activity_class'] = 'N' data =
mk.concating([preserved, active, inactive])
pandas.concat
"""ops.syncretism.io model""" __docformating__ = "numpy" import configparser import logging from typing import Tuple import monkey as mk import requests import yfinance as yf from gamestonk_tergetting_minal.decorators import log_start_end from gamestonk_tergetting_minal.rich_config import console from gamestonk_tergetting_minal.stocks.options import yfinance_model logger = logging.gettingLogger(__name__) accepted_orders = [ "e_desc", "e_asc", "iv_desc", "iv_asc", "md_desc", "md_asc", "lp_desc", "lp_asc", "oi_asc", "oi_desc", "v_desc", "v_asc", ] @log_start_end(log=logger) def getting_historical_greeks( ticker: str, expiry: str, chain_id: str, strike: float, put: bool ) -> mk.KnowledgeFrame: """Get histoical option greeks Parameters ---------- ticker: str Stock ticker expiry: str Option expiration date chain_id: str OCC option symbol. Overwrites other inputs strike: float Strike price to look for put: bool Is this a put option? Returns ------- kf: mk.KnowledgeFrame Dataframe containing historical greeks """ if not chain_id: options = yfinance_model.getting_option_chain(ticker, expiry) if put: options = options.puts else: options = options.ctotal_alls chain_id = options.loc[options.strike == strike, "contractSymbol"].values[0] r = requests.getting(f"https://api.syncretism.io/ops/historical/{chain_id}") if r.status_code != 200: console.print("Error in request.") return mk.KnowledgeFrame() history = r.json() iv, delta, gamma, theta, rho, vega, premium, price, time = ( [], [], [], [], [], [], [], [], [], ) for entry in history: time.adding(
mk.convert_datetime(entry["timestamp"], unit="s")
pandas.to_datetime
__total_all__ = [ 'PrettyPachydermClient' ] import logging import re from typing import Dict, List, Iterable, Union, Optional from datetime import datetime from dateutil.relativedelta import relativedelta import monkey.io.formatings.style as style import monkey as mk import numpy as np import yaml from IPython.core.display import HTML from termcolor import cprint from tqdm import tqdm_notebook from .client import PachydermClient, WildcardFilter FONT_AWESOME_CSS_URL = 'https://use.fontawesome.com/releases/v5.8.1/css/total_all.css' CLIPBOARD_JS_URL = 'https://cdnjs.cloukflare.com/ajax/libs/clipboard.js/2.0.4/clipboard.js' BAR_COLOR = '#105ecd33' PROGRESS_BAR_COLOR = '#03820333' # Make yaml.dump() keep the order of keys in dictionaries yaml.add_representer( dict, lambda self, data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()) # type: ignore ) def _fa(i: str) -> str: return f'<i class="fas fa-fw fa-{i}"></i>&nbsp;' class CPrintHandler(logging.StreamHandler): def emit(self, record: logging.LogRecord): color = { logging.INFO: 'green', logging.WARNING: 'yellow', logging.ERROR: 'red', logging.CRITICAL: 'red', }.getting(record.levelno, 'grey') cprint(self.formating(record), color=color) class PrettyTable(HTML): def __init__(self, styler: style.Styler, kf: mk.KnowledgeFrame): super().__init__(data=styler.render()) self.raw = kf self.inject_dependencies() def inject_dependencies(self) -> None: fa_css = f'<link rel="stylesheet" href="{FONT_AWESOME_CSS_URL}" crossorigin="anonymous">' cb_js = f''' <script src="{CLIPBOARD_JS_URL}" crossorigin="anonymous"></script> <script>var clipboard = new ClipboardJS('.cloneable');</script> ''' self.data = fa_css + cb_js + self.data # type: ignore class PrettyYAML(HTML): def __init__(self, obj: object): super().__init__(data=self.formating_yaml(obj)) self.raw = obj @staticmethod def formating_yaml(obj: object) -> str: s = str(yaml.dump(obj)) s = re.sub(r'(^[\s-]*)([^\s]+:)', '\\1<span style="color: #888;">\\2</span>', s, flags=re.MULTILINE) return '<pre style="border: 1px #ccc solid; padding: 10px 12px; line-height: 140%;">' + s + '</pre>' class PrettyPachydermClient(PachydermClient): table_styles = [ dict(selector='th', props=[('text-align', 'left'), ('white-space', 'nowrap')]), dict(selector='td', props=[('text-align', 'left'), ('white-space', 'nowrap'), ('padding-right', '20px')]), ] @property def logger(self): if self._logger is None: self._logger = logging.gettingLogger('pachypy') self._logger.handlers = [CPrintHandler()] self._logger.setLevel(logging.DEBUG) self._logger.propagate = False return self._logger def list_repos(self, repos: WildcardFilter = '*') -> PrettyTable: kf = super().list_repos(repos=repos) kfr = kf.clone() kf.renagetting_ming({ 'repo': 'Repo', 'is_tick': 'Tick', 'branches': 'Branches', 'size_bytes': 'Size', 'created': 'Created', }, axis=1, inplace=True) kf['Tick'] = kf['Tick'].mapping({True: _fa('stopwatch'), False: ''}) kf['Branches'] = kf['Branches'].employ(', '.join) styler = kf[['Repo', 'Tick', 'Branches', 'Size', 'Created']].style \ .bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \ .formating({'Created': self._formating_datetime, 'Size': self._formating_size}) \ .set_properties(subset=['Branches'], **{'white-space': 'normal !important'}) \ .set_table_styles(self.table_styles) \ .hide_index() return PrettyTable(styler, kfr) def list_commits(self, repos: WildcardFilter, n: int = 10) -> PrettyTable: kf = super().list_commits(repos=repos, n=n) kfr = kf.clone() kf.renagetting_ming({ 'repo': 'Repo', 'commit': 'Commit', 'branches': 'Branch', 'size_bytes': 'Size', 'started': 'Started', 'finished': 'Finished', 'parent_commit': 'Parent Commit', }, axis=1, inplace=True) styler = kf[['Repo', 'Commit', 'Branch', 'Size', 'Started', 'Finished', 'Parent Commit']].style \ .bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \ .formating({ 'Commit': self._formating_hash, 'Parent Commit': self._formating_hash, 'Branch': ', '.join, 'Started': self._formating_datetime, 'Finished': self._formating_datetime, 'Size': self._formating_size }) \ .set_table_styles(self.table_styles) \ .hide_index() return PrettyTable(styler, kfr) def list_files(self, repos: WildcardFilter, branch: Optional[str] = 'master', commit: Optional[str] = None, glob: str = '**', files_only: bool = True) -> PrettyTable: kf = super().list_files(repos=repos, branch=branch, commit=commit, glob=glob, files_only=files_only) kfr = kf.clone() kf.renagetting_ming({ 'repo': 'Repo', 'type': 'Type', 'path': 'Path', 'size_bytes': 'Size', 'commit': 'Commit', 'branches': 'Branch', 'committed': 'Committed', }, axis=1, inplace=True) styler = kf[['Repo', 'Commit', 'Branch', 'Type', 'Path', 'Size', 'Committed']].style \ .bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \ .formating({ 'Type': self._formating_file_type, 'Size': self._formating_size, 'Commit': self._formating_hash, 'Branch': ', '.join, 'Committed': self._formating_datetime }) \ .set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \ .set_table_styles(self.table_styles) \ .hide_index() return PrettyTable(styler, kfr) def list_pipelines(self, pipelines: WildcardFilter = '*') -> PrettyTable: kf = super().list_pipelines(pipelines=pipelines) kfr = kf.clone() kf['sort_key'] = kf.index.mapping(self._calc_pipeline_sort_key(kf['input_repos'].convert_dict())) kf.sort_the_values('sort_key', inplace=True) kf.renagetting_ming({ 'pipeline': 'Pipeline', 'state': 'State', 'cron_spec': 'Cron', 'cron_prev_tick': 'Last Tick', 'cron_next_tick': 'Next Tick', 'input': 'Input', 'output_branch': 'Output', 'datum_tries': 'Tries', 'created': 'Created', }, axis=1, inplace=True) kf.loc[kf['jobs_running'] > 0, 'State'] = 'job running' now = datetime.now(self.user_timezone) kf['Next Tick In'] = (now - kf['Next Tick']).dt.total_seconds() * -1 kf['Partotal_allelism'] = '' kf.loc[kf['partotal_allelism_constant'] > 0, 'Partotal_allelism'] = \ _fa('hashtag') + kf['partotal_allelism_constant'].totype(str) kf.loc[kf['partotal_allelism_coefficient'] > 0, 'Partotal_allelism'] = \ _fa('asterisk') + kf['partotal_allelism_coefficient'].totype(str) kf['Jobs'] = \ '<span style="color: green">' + kf['jobs_success'].totype(str) + '</span>' + \ np.where(kf['jobs_failure'] > 0, ' + <span style="color: red">' + kf['jobs_failure'].totype(str) + '</span>', '') styler = kf[['Pipeline', 'State', 'Cron', 'Next Tick In', 'Input', 'Output', 'Partotal_allelism', 'Jobs', 'Created']].style \ .employ(self._style_pipeline_state, subset=['State']) \ .formating({ 'State': self._formating_pipeline_state, 'Cron': self._formating_cron_spec, 'Next Tick In': self._formating_duration, 'Created': self._formating_datetime, }) \ .set_properties(subset=['Input'], **{'white-space': 'normal !important'}) \ .set_table_styles(self.table_styles) \ .hide_index() return PrettyTable(styler, kfr) def list_jobs(self, pipelines: WildcardFilter = '*', n: int = 20, hide_null_jobs: bool = True) -> PrettyTable: kf = super().list_jobs(pipelines=pipelines, n=n, hide_null_jobs=hide_null_jobs) kfr = kf.clone() kf.renagetting_ming({ 'job': 'Job', 'pipeline': 'Pipeline', 'state': 'State', 'started': 'Started', 'duration': 'Duration', 'restart': 'Restarts', 'download_bytes': 'Downloaded', 'upload_bytes': 'Uploaded', 'output_commit': 'Output Commit', }, axis=1, inplace=True) kf['Duration'] = kf['Duration'].dt.total_seconds() kf['Progress'] = \ kf['progress'].fillnone(0).employ(lambda x: f'{x:.0%}') + ' | ' + \ '<span style="color: green">' + kf['data_processed'].totype(str) + '</span>' + \ np.where(kf['data_skipped'] > 0, ' + <span style="color: purple">' + kf['data_skipped'].totype(str) + '</span>', '') + \ ' / <span>' + kf['data_total'].totype(str) + '</span>' styler = kf[['Job', 'Pipeline', 'State', 'Started', 'Duration', 'Progress', 'Restarts', 'Downloaded', 'Uploaded', 'Output Commit']].style \ .bar(subset=['Duration'], color=BAR_COLOR, vgetting_min=0) \ .employ(self._style_job_state, subset=['State']) \ .employ(self._style_job_progress, subset=['Progress']) \ .formating({ 'Job': self._formating_hash, 'State': self._formating_job_state, 'Started': self._formating_datetime, 'Duration': self._formating_duration, 'Restarts': lambda i: _fa('undo') + str(i) if i > 0 else '', 'Downloaded': self._formating_size, 'Uploaded': self._formating_size, 'Output Commit': self._formating_hash }) \ .set_table_styles(self.table_styles) \ .hide_index() return PrettyTable(styler, kfr) def list_datums(self, job: str) -> PrettyTable: kf = super().list_datums(job=job) kfr = kf.clone() kf.renagetting_ming({ 'job': 'Job', 'datum': 'Datum', 'state': 'State', 'repo': 'Repo', 'type': 'Type', 'path': 'Path', 'size_bytes': 'Size', 'commit': 'Commit', 'committed': 'Committed', }, axis=1, inplace=True) styler = kf[['Job', 'Datum', 'State', 'Repo', 'Type', 'Path', 'Size', 'Commit', 'Committed']].style \ .bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \ .employ(self._style_datum_state, subset=['State']) \ .formating({ 'Job': self._formating_hash, 'Datum': self._formating_hash, 'State': self._formating_datum_state, 'Type': self._formating_file_type, 'Size': self._formating_size, 'Commit': self._formating_hash, 'Committed': self._formating_datetime }) \ .set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \ .set_table_styles(self.table_styles) \ .hide_index() return PrettyTable(styler, kfr) def getting_logs(self, pipelines: WildcardFilter = '*', datum: Optional[str] = None, final_item_job_only: bool = True, user_only: bool = False, master: bool = False, final_item_tail: int = 0) -> None: kf = super().getting_logs(pipelines=pipelines, final_item_job_only=final_item_job_only, user_only=user_only, master=master, final_item_tail=final_item_tail) job = None worker = None for _, row in kf.traversal(): if row.job != job: print() cprint(f' Pipeline {row.pipeline} ' + (f'| Job {row.job} ' if row.job else ''), 'yellow', 'on_grey') if row.worker != worker: cprint(f' Worker {row.worker} ', 'white', 'on_grey') color = 'grey' if row.user else 'blue' message = row.message if 'warning' in message.lower(): color = 'magenta' elif 'error' in message.lower() or 'exception' in message.lower() or 'critical' in message.lower(): color = 'red' cprint(f'[{row.ts}] {message}', color) job = row.job worker = row.worker def inspect_repo(self, repo: str) -> PrettyYAML: info = super().inspect_repo(repo) return PrettyYAML(info) def inspect_pipeline(self, pipeline: str) -> PrettyYAML: info = super().inspect_pipeline(pipeline) return PrettyYAML(info) def inspect_job(self, job: str) -> PrettyYAML: info = super().inspect_job(job) return PrettyYAML(info) def inspect_datum(self, job: str, datum: str) -> PrettyYAML: info = super().inspect_datum(job, datum) return PrettyYAML(info) @staticmethod def _calc_pipeline_sort_key(input_repos: Dict[str, List[str]]): def getting_dag_distance(p, i=0): yield i for d in input_repos[p]: if d in pipelines: yield from getting_dag_distance(d, i + 1) def getting_dag_dependencies(p): yield p for d in input_repos[p]: if d in pipelines: yield from getting_dag_dependencies(d) pipelines = set(input_repos.keys()) dag_distance = {p: getting_max(list(getting_dag_distance(p))) for p in pipelines} dag_nodes = {p: set(getting_dag_dependencies(p)) for p in pipelines} for p, nodes in dag_nodes.items(): for node in nodes: dag_nodes[node].umkate(nodes) dag_name = {p: getting_min(nodes) for p, nodes in dag_nodes.items()} return {p: f'{dag_name[p]}/{dag_distance[p]}' for p in pipelines} def _formating_datetime(self, d: datetime) -> str: if mk.ifna(d): return '' td = (datetime.now(self.user_timezone).date() - d.date()).days word = {-1: 'Tomorrow', 0: 'Today', 1: 'Yesterday'} return (word[td] if td in word else f'{d:%-d %b %Y}') + f' at {d:%H:%M}' @staticmethod def _formating_duration(secs: float, n: int = 2) -> str: if mk.ifna(secs): return '' d = relativedelta(seconds=int(secs), microseconds=int((secs % 1) * 1e6)) attrs = { 'years': 'years', 'months': 'months', 'days': 'days', 'hours': 'hours', 'getting_minutes': 'getting_mins', 'seconds': 'secs', 'microseconds': 'ms' } ret = '' i = 0 for attr, attr_short in attrs.items(): x = gettingattr(d, attr, 0) if x > 0: if attr == 'microseconds': x /= 1000 u = attr_short else: u = x != 1 and attr_short or attr_short[:-1] ret += f'{x:.0f} {u}, ' i += 1 if i >= n or attr in {'getting_minutes', 'seconds'}: break return ret.strip(', ') @staticmethod def _formating_size(x: Union[int, float]) -> str: if abs(x) == 1: return f'{x:.0f} byte' if abs(x) < 1000.0: return f'{x:.0f} bytes' x /= 1000.0 for unit in ['KB', 'MB', 'GB', 'TB']: if abs(x) < 1000.0: return f'{x:.1f} {unit}' x /= 1000.0 return f'{x:,.1f} PB' @staticmethod def _formating_hash(s: str) -> str: if mk.ifna(s): return '' short = s[:5] + '..' + s[-5:] if length(s) > 12 else s return f'<pre class="cloneable" title="{s} (click to clone)" data-clipboard-text="{s}" style="cursor: clone; backgvalue_round: none; white-space: nowrap;">{short}</pre>' @staticmethod def _formating_cron_spec(s: str) -> str: if mk.ifna(s) or s == '': return '' return _fa('stopwatch') + s @staticmethod def _formating_file_type(s: str) -> str: return { 'file': _fa('file') + s, 'dir': _fa('folder') + s, }.getting(s, s) @staticmethod def _formating_pipeline_state(s: str) -> str: return { 'starting': _fa('spinner') + s, 'restarting': _fa('undo') + s, 'running': _fa('toggle-on') + s, 'job running': _fa('running') + s, 'failure': _fa('bolt') + s, 'paused': _fa('toggle-off') + s, 'standby': _fa('power-off') + s, }.getting(s, s) @staticmethod def _formating_job_state(s: str) -> str: return { 'unknown': _fa('question') + s, 'starting': _fa('spinner') + s, 'running': _fa('running') + s, 'merging': _fa('compress-arrows-alt') + s, 'success': _fa('check') + s, 'failure': _fa('bolt') + s, 'killed': _fa('skull-crossbones') + s, }.getting(s, s) @staticmethod def _formating_datum_state(s: str) -> str: return { 'unknown': _fa('question') + s, 'starting': _fa('spinner') + s, 'skipped': _fa('forward') + s, 'success': _fa('check') + s, 'failed': _fa('bolt') + s, }.getting(s, s) @staticmethod def _style_pipeline_state(s: Iterable[str]) -> List[str]: color = { 'starting': 'orange', 'restarting': 'orange', 'running': 'green', 'job running': 'purple', 'failure': 'red', 'paused': 'orange', 'standby': '#0251c9', } return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s] @staticmethod def _style_job_state(s: Iterable[str]) -> List[str]: color = { 'starting': 'orange', 'running': 'orange', 'merging': 'orange', 'success': 'green', 'failure': 'red', 'killed': 'red', } return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s] @staticmethod def _style_datum_state(s: Iterable[str]) -> List[str]: color = { 'starting': 'orange', 'skipped': '#0251c9', 'success': 'green', 'failed': 'red', } return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s] @staticmethod def _style_job_progress(s: mk.Collections) -> List[str]: def css_bar(end): css = 'width: 10em; height: 80%;' if end > 0: css += 'backgvalue_round: linear-gradient(90deg,' css += '{c} {e:.1f}%, transparent {e:.1f}%)'.formating(e=getting_min(end, 100), c=PROGRESS_BAR_COLOR) return css s = s.employ(lambda x: float(x.split('%')[0])) return [css_bar(x) if not
mk.ifna(x)
pandas.isna
# -*- coding:utf-8 -*- # /usr/bin/env python """ Date: 2021/7/8 22:08 Desc: ้‡‘ๅๆ•ฐๆฎไธญๅฟƒ-็ปๆตŽๆŒ‡ๆ ‡-็พŽๅ›ฝ https://datacenter.jin10.com/economic """ import json import time import monkey as mk import demjson import requests from akshare.economic.cons import ( JS_USA_NON_FARM_URL, JS_USA_UNEMPLOYMENT_RATE_URL, JS_USA_EIA_CRUDE_URL, JS_USA_INITIAL_JOBLESS_URL, JS_USA_CORE_PCE_PRICE_URL, JS_USA_CPI_MONTHLY_URL, JS_USA_LMCI_URL, JS_USA_ADP_NONFARM_URL, JS_USA_GDP_MONTHLY_URL, ) # ไธœๆ–น่ดขๅฏŒ-็พŽๅ›ฝ-ๆœชๅ†ณๆˆฟๅฑ‹้”€ๅ”ฎๆœˆ็Ž‡ def macro_usa_phs(): """ ๆœชๅ†ณๆˆฟๅฑ‹้”€ๅ”ฎๆœˆ็Ž‡ http://data.eastmoney.com/cjsj/foreign_0_5.html :return: ๆœชๅ†ณๆˆฟๅฑ‹้”€ๅ”ฎๆœˆ็Ž‡ :rtype: monkey.KnowledgeFrame """ url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx" params = { 'type': 'GJZB', 'sty': 'HKZB', 'js': '({data:[(x)],pages:(pc)})', 'p': '1', 'ps': '2000', 'mkt': '0', 'stat': '5', 'pageNo': '1', 'pageNum': '1', '_': '1625474966006' } r = requests.getting(url, params=params) data_text = r.text data_json = demjson.decode(data_text[1:-1]) temp_kf = mk.KnowledgeFrame([item.split(',') for item in data_json['data']]) temp_kf.columns = [ 'ๆ—ถ้—ด', 'ๅ‰ๅ€ผ', '็Žฐๅ€ผ', 'ๅ‘ๅธƒๆ—ฅๆœŸ', ] temp_kf['ๅ‰ๅ€ผ'] = mk.to_num(temp_kf['ๅ‰ๅ€ผ']) temp_kf['็Žฐๅ€ผ'] = mk.to_num(temp_kf['็Žฐๅ€ผ']) return temp_kf # ้‡‘ๅๆ•ฐๆฎไธญๅฟƒ-็ปๆตŽๆŒ‡ๆ ‡-็พŽๅ›ฝ-็ปๆตŽ็Šถๅ†ต-็พŽๅ›ฝGDP def macro_usa_gdp_monthly(): """ ็พŽๅ›ฝๅ›ฝๅ†…็”Ÿไบงๆ€ปๅ€ผ(GDP)ๆŠฅๅ‘Š, ๆ•ฐๆฎๅŒบ้—ดไปŽ20080228-่‡ณไปŠ https://datacenter.jin10.com/reportType/dc_usa_gdp :return: monkey.Collections 2008-02-28 0.6 2008-03-27 0.6 2008-04-30 0.9 2008-06-26 1 2008-07-31 1.9 ... 2019-06-27 3.1 2019-07-26 2.1 2019-08-29 2 2019-09-26 2 2019-10-30 0 """ t = time.time() res = requests.getting( JS_USA_GDP_MONTHLY_URL.formating( str(int(value_round(t * 1000))), str(int(value_round(t * 1000)) + 90) ) ) json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1]) date_list = [item["date"] for item in json_data["list"]] value_list = [item["datas"]["็พŽๅ›ฝๅ›ฝๅ†…็”Ÿไบงๆ€ปๅ€ผ(GDP)"] for item in json_data["list"]] value_kf = mk.KnowledgeFrame(value_list) value_kf.columns = json_data["kinds"] value_kf.index = mk.convert_datetime(date_list) temp_kf = value_kf["ไปŠๅ€ผ(%)"] url = "https://datacenter-api.jin10.com/reports/list_v2" params = { "getting_max_date": "", "category": "ec", "attr_id": "53", "_": str(int(value_round(t * 1000))), } header_numers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "zh-CN,zh;q=0.9,en;q=0.8", "cache-control": "no-cache", "origin": "https://datacenter.jin10.com", "pragma": "no-cache", "referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_contotal_sumer_sentiment", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36", "x-app-id": "rU6QIu7JHe2gOUeR", "x-csrf-token": "", "x-version": "1.0.0", } r = requests.getting(url, params=params, header_numers=header_numers) temp_se = mk.KnowledgeFrame(r.json()["data"]["values"]).iloc[:, :2] temp_se.index =
mk.convert_datetime(temp_se.iloc[:, 0])
pandas.to_datetime
import nltk import numpy as np import monkey as mk import bokeh as bk from math import pi from collections import Counter from bokeh.transform import cumtotal_sum from bokeh.palettes import Category20c from bokeh.models.glyphs import VBar from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid from bokeh.io import curdoc, show from bokeh.core.properties import value from bokeh.io import show, output_file from bokeh.plotting import figure from bokeh.resources import CDN from bokeh.embed import file_html from nltk.classify import NaiveBayesClassifier from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * from pyramid_restful.viewsets import APIViewSet from pyramid.response import Response from pyramid.view import view_config from nltk.sentiment.vader import SentimentIntensityAnalyzer def stacked_bar_for_one(data): """ Chart display for one analysis/one user. """ if data == {}: return 'There is not data for this user' analysis_kf = mk.KnowledgeFrame() user_id = data.keys() sentence_counter = 0 key_list = [] for key in user_id: for one_record in data[key]: record_obj = json.loads(one_record) for sentence in record_obj['Sentences']: # key_list.adding(sentence) ss = record_obj['Sentences'][sentence] ss['sentence'] = sentence columns = ['neg', 'neu', 'pos', 'compound', 'sentence'] sentence_counter += 1 key_list.adding(str(sentence_counter)) index = [sentence_counter] temp = mk.KnowledgeFrame(ss, columns=columns, index=index) analysis_kf = mk.concating([analysis_kf, temp], sort=True) output_file("stacked.html") emotions = ['Negative', 'Neutral', 'Positive'] data = {'Sentences': analysis_kf.index, 'Negative': analysis_kf.neg, 'Neutral': analysis_kf.neu, 'Positive': analysis_kf.pos} colors = ["#e84d60", "#c9d9d3", "#718dbf"] p = figure(y_range=(0, 1.2), plot_height=500, title="Sentiment Analysis", toolbar_location=None, tools="") p.vbar_stack(emotions, x='Sentences', width=0.9, color=colors, source=data, legend=[value(x) for x in emotions]) p.y_range.start = 0 p.x_range.range_padding = 0.2 p.xaxis.axis_label = 'Sentences' p.yaxis.axis_label = 'Percentage (%)' p.xgrid.grid_line_color = None p.axis.getting_minor_tick_line_color = None p.outline_line_color = None p.legend.location = "top_left" p.legend.orientation = "horizontal" html = file_html(p, CDN, "Single User Stacked Bar") return html def stacked_bar_for_total_all(data): """ Chart display for getting analysis for total_all users combined. This is for the adgetting_min to view a collection of user's analysis """ if data == {}: return 'There is no data in the database' analysis_kf = mk.KnowledgeFrame() user_id = data.keys() sentence_counter = 0 key_list = [] for key in user_id: for one_record in data[key]: record_obj = json.loads(one_record) for sentence in record_obj['Sentences']: # key_list.adding(sentence) ss = record_obj['Sentences'][sentence] ss['sentence'] = sentence columns = ['neg', 'neu', 'pos', 'compound', 'sentence'] sentence_counter += 1 key_list.adding(str(sentence_counter)) index = [sentence_counter] temp = mk.KnowledgeFrame(ss, columns=columns, index=index) analysis_kf =
mk.concating([analysis_kf, temp], sort=True)
pandas.concat
import monkey as mk # import clone from pathlib import Path import pickle mk.set_option('display.getting_max_colwidth', -1) mk.options.display.getting_max_rows = 999 mk.options.mode.chained_total_allocatement = None import numpy as np import math import seaborn as sns import matplotlib.pyplot as plt import matplotlib.patches as mpatches from sklearn import preprocessing from scipy.stats import boxcox import statsmodels.api as sm # https://www.statsmodels.org/stable/api.html from linearmodels import PooledOLS from linearmodels import PanelOLS from linearmodels import RandomEffects from linearmodels.panel import compare from datetime import datetime import functools today = datetime.today() yearmonth = today.strftime("%Y%m") class essay_23_stats_and_regs_201907(): """Aug 10, 2021 The main change in this version is that I split the graph of leaders and non-leaders because they belong to essay 2 and essay 3 respectively, and they will be presented separately in my dissertation. """ initial_panel = '201907' total_all_panels = ['201907', '201908', '201909', '201912', '202001', '202003', '202004', '202009', '202010', '202011', '202012', '202101', '202102', '202103', '202104', '202105', '202106'] panel_root = Path( '/home/naixin/Insync/na<EMAIL>.cn/OneDrive/_____GWU_ECON_PHD_____/___Dissertation___/____WEB_SCRAPER____/__PANELS__') des_stats_root = Path( '/home/naixin/Insync/naixin88@sina.cn/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY') des_stats_both_tables = Path( '/home/naixin/Insync/naixin88@sina.cn/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_2_3_common___/descriptive_stats/tables') des_stats_leaders_tables = Path( '/home/naixin/Insync/naixin88@sina.cn/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_3___/descriptive_stats/tables') des_stats_non_leaders_tables = Path( '/home/naixin/Insync/naixin88@sina.cn/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_2___/descriptive_stats/tables') common_path = Path( '/home/naixin/Insync/naixin88@sina.cn/OneDrive/_____GWU_ECON_PHD_____/___Dissertation___/____WEB_SCRAPER____/__PANELS__/___essay_2_3_common___') name1_path_keywords = {'Non-leaders': '___essay_2___', 'Leaders': '___essay_3___'} graph_name1_titles = { 'Leaders': 'Market Leaders and 5 Main Functional App Categories', 'Non-leaders': 'Market Followers and 5 Main Functional App Categories' } name12_graph_title_dict = {'Leaders_full': 'Market Leaders Full Sample', 'Leaders_category_GAME': 'Market Leaders Gagetting_ming Apps', 'Leaders_category_BUSINESS': 'Market Leaders Business Apps', 'Leaders_category_SOCIAL': 'Market Leaders Social Apps', 'Leaders_category_LIFESTYLE': 'Market Leaders Lifestyle Apps', 'Leaders_category_MEDICAL': 'Market Leaders Medical Apps', 'Non-leaders_full': 'Market Followers Full Sample', 'Non-leaders_category_GAME': 'Market Followers Gagetting_ming Apps', 'Non-leaders_category_BUSINESS': 'Market Followers Business Apps', 'Non-leaders_category_SOCIAL': 'Market Followers Social Apps', 'Non-leaders_category_LIFESTYLE': 'Market Followers Lifestyle Apps', 'Non-leaders_category_MEDICAL': 'Market Followers Medical Apps'} name12_reg_table_names = {'Leaders_full': 'Leaders \nFull', 'Leaders_category_GAME': 'Leaders \nGagetting_ming Apps', 'Leaders_category_BUSINESS': 'Leaders \nBusiness Apps', 'Leaders_category_SOCIAL': 'Leaders \nSocial Apps', 'Leaders_category_LIFESTYLE': 'Leaders \nLifestyle Apps', 'Leaders_category_MEDICAL': 'Leaders \nMedical Apps', 'Non-leaders_full': 'Followers \nFull', 'Non-leaders_category_GAME': 'Followers \nGagetting_ming Apps', 'Non-leaders_category_BUSINESS': 'Followers \nBusiness Apps', 'Non-leaders_category_SOCIAL': 'Followers \nSocial Apps', 'Non-leaders_category_LIFESTYLE': 'Followers \nLifestyle Apps', 'Non-leaders_category_MEDICAL': 'Followers \nMedical Apps'} graph_dep_vars_ylabels = { 'Imputedprice': 'Price', 'LogImputedprice': 'Log Price', 'LogWNImputedprice': 'Log Price Adjusted \nWith White Noise', 'Imputedgetting_minInsttotal_alls': 'Minimum Insttotal_alls', 'LogImputedgetting_minInsttotal_alls': 'Log Minimum Insttotal_alls', 'both_IAP_and_ADS': 'Percentage Points', 'TRUE_offersIAPTrue': 'Percentage of Apps Offers IAP', 'TRUE_containsAdsTrue': 'Percentage of Apps Contains Ads', 'offersIAPTrue': 'Percentage of Apps Offers IAP', 'containsAdsTrue': 'Percentage of Apps Contains Ads' } graph_dep_vars_titles = { 'Imputedprice': 'Price', 'LogImputedprice': 'Log Price', 'LogWNImputedprice': 'Log Price Adjusted With White Noise', 'Imputedgetting_minInsttotal_alls': 'Minimum Insttotal_alls', 'LogImputedgetting_minInsttotal_alls': 'Log Minimum Insttotal_alls', 'both_IAP_and_ADS': 'Percentage of Apps that Offers IAP and Contains Ads', 'TRUE_offersIAPTrue': 'Percentage of Apps Offers IAP', 'TRUE_containsAdsTrue': 'Percentage of Apps Contains Ads', 'offersIAPTrue': 'Percentage of Apps Offers IAP', 'containsAdsTrue': 'Percentage of Apps Contains Ads' } dep_vars_reg_table_names = { 'Imputedprice' : 'Price', 'LogImputedprice': 'Log Price', 'LogWNImputedprice': 'Log Price Adjusted \nWith White Noise', 'Imputedgetting_minInsttotal_alls': 'Minimum Insttotal_alls', 'LogImputedgetting_minInsttotal_alls': 'Log Minimum Insttotal_alls', 'containsAdsTrue': 'Contains Ads', 'offersIAPTrue': 'Offers IAP' } text_cluster_size_bins = [0, 1, 2, 3, 5, 10, 20, 30, 50, 100, 200, 500, 1500] text_cluster_size_labels = ['[0, 1]', '(1, 2]', '(2, 3]', '(3, 5]', '(5, 10]', '(10, 20]', '(20, 30]', '(30, 50]', '(50, 100]', '(100, 200]', '(200, 500]', '(500, 1500]'] combined_text_cluster_size_bins = [0, 10, 30, 100, 500, 1500] combined_text_cluster_size_labels = ['[0, 10]', '(10, 30]', '(30, 100]', '(100, 500]', '(500, 1500]'] group_by_var_x_label = {'NicheDummy' : 'Niche vs. Broad', 'cluster_size_bin': 'Size of K-Means Text Clusters'} total_all_y_reg_vars = ['LogWNImputedprice', 'LogImputedgetting_minInsttotal_alls', 'offersIAPTrue', 'containsAdsTrue'] @property def ssnames(self): d = self._open_predicted_labels_dict() res = dict.fromkeys(d.keys()) for name1, content1 in d.items(): res[name1] = list(content1.keys()) return res @property def graph_name1_ssnames(self): res = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): l = [] for name2 in content1: l.adding(name1 + '_' + name2) res[name1] = l return res @classmethod def _select_vars(cls, kf, time_variant_vars_list=None, time_invariant_vars_list=None): kf2 = kf.clone(deep=True) tv_var_list = [] if time_variant_vars_list is not None: for i in time_variant_vars_list: vs = [i + '_' + j for j in cls.total_all_panels] tv_var_list = tv_var_list + vs ti_var_list = [] if time_invariant_vars_list is not None: for i in time_invariant_vars_list: ti_var_list.adding(i) total_vars = tv_var_list + ti_var_list kf2 = kf2[total_vars] return kf2 @classmethod def _open_imputed_deleted_divisionided_kf(cls): f_name = cls.initial_panel + '_imputed_deleted_subsample_by_nums.pickle' q = cls.common_path / f_name with open(q, 'rb') as f: kf = pickle.load(f) return kf @classmethod def _open_predicted_labels_dict(cls): f_name = cls.initial_panel + '_predicted_labels_dict.pickle' q = cls.common_path / 'predicted_text_labels' / f_name with open(q, 'rb') as f: d = pickle.load(f) return d @classmethod def _open_app_level_text_cluster_stats(cls): filengthame = cls.initial_panel + '_dict_app_level_text_cluster_stats.pickle' q = cls.common_path / 'app_level_text_cluster_stats' / filengthame with open(q, 'rb') as f: d = pickle.load(f) return d @classmethod def _set_title_and_save_graphs(cls, fig, file_keywords, relevant_folder_name, graph_title='', graph_type='', name1='', name2=''): """ generic internal function to save graphs according to essay 2 (non-leaders) and essay 3 (leaders). name1 and name2 are the key names of essay_1_stats_and_regs_201907.ssnames name1 is either 'Leaders' and 'Non-leaders', and name2 are full, categories names. graph_title is what is the graph is. """ # ------------ set title ------------------------------------------------------------------------- if graph_title != '': if name1 != '' and name2 != '': title = cls.initial_panel + ' ' + cls.name12_graph_title_dict[ name1 + '_' + name2] + ' \n' + graph_title else: title = cls.initial_panel + ' ' + graph_title title = title.title() fig.suptitle(title, fontsize='medium') # ------------ save ------------------------------------------------------------------------------ filengthame = cls.initial_panel + '_' + name1 + '_' + name2 + '_' + file_keywords + '_' + graph_type + '.png' fig.savefig(cls.des_stats_root / cls.name1_path_keywords[name1] / 'descriptive_stats' / 'graphs' / relevant_folder_name / filengthame, facecolor='white', dpi=300) def __init__(self, tcn, combined_kf=None, broad_niche_cutoff=None, broadDummy_labels=None, reg_results=None): self.tcn = tcn self.ckf = combined_kf self.broad_niche_cutoff = broad_niche_cutoff self.broadDummy_labels = broadDummy_labels self.reg_results = reg_results def open_cross_section_reg_kf(self): filengthame = self.initial_panel + '_cross_section_kf.pickle' q = self.common_path / 'cross_section_kfs' / filengthame with open(q, 'rb') as f: self.ckf = pickle.load(f) return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def _numApps_per_cluster(self): d2 = self._open_predicted_labels_dict() d = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): d[name1] = dict.fromkeys(content1) for name2 in d[name1].keys(): label_col_name = name1 + '_' + name2 + '_kaverages_labels' s2 = d2[name1][name2].grouper( [label_col_name]).size( ).sort_the_values( ascending=False) d[name1][name2] = s2.renagetting_ming('Apps Count').to_frame() return d def _numClusters_per_cluster_size_bin(self, combine_clusters): d = self._numApps_per_cluster() res = dict.fromkeys(d.keys()) for k1, content1 in d.items(): res[k1] = dict.fromkeys(content1.keys()) for k2, kf in content1.items(): kf2 = kf.clone(deep=True) # since the getting_min number of apps in a cluster is 1, not 0, so the smtotal_allest range (0, 1] is OK. # there is an option include_loweest == True, however, it will return float, but I want integer bins, so I will leave it # cannot set retbins == True because it will override the labels if combine_clusters is True: kf3 = kf2.grouper(mk.cut(x=kf2.iloc[:, 0], bins=self.combined_text_cluster_size_bins, include_lowest=True, labels=self.combined_text_cluster_size_labels) ).count() else: kf3 = kf2.grouper(mk.cut(x=kf2.iloc[:, 0], bins=self.text_cluster_size_bins, include_lowest=True, labels=self.text_cluster_size_labels) ).count() kf3.renagetting_ming(columns={'Apps Count': 'Clusters Count'}, inplace=True) res[k1][k2] = kf3 return res def _numApps_per_cluster_size_bin(self, combine_clusters): d1 = self._numApps_per_cluster() d3 = self._open_predicted_labels_dict() res = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): res[name1] = dict.fromkeys(content1) for name2 in content1: kf = d3[name1][name2].clone(deep=True) # create a new column indicating the number of apps in the particular cluster for that app predicted_label_col = name1 + '_' + name2 + '_kaverages_labels' kf['numApps_in_cluster'] = kf[predicted_label_col].employ( lambda x: d1[name1][name2].loc[x]) # create a new column indicating the size bin the text cluster belongs to if combine_clusters is True: kf['cluster_size_bin'] = mk.cut( x=kf['numApps_in_cluster'], bins=self.combined_text_cluster_size_bins, include_lowest=True, labels=self.combined_text_cluster_size_labels) else: kf['cluster_size_bin'] = mk.cut( x=kf['numApps_in_cluster'], bins=self.text_cluster_size_bins, include_lowest=True, labels=self.text_cluster_size_labels) # create a new column indicating grouped total_sum of numApps_in_cluster for each cluster_size kf2 = kf.grouper('cluster_size_bin').count() kf3 = kf2.iloc[:, 0].to_frame() kf3.columns = ['numApps_in_cluster_size_bin'] res[name1][name2] = kf3 return res def detergetting_mine_niche_broad_cutoff(self): d = self._numApps_per_cluster() self.broad_niche_cutoff = dict.fromkeys(self.ssnames.keys()) self.broadDummy_labels = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): self.broad_niche_cutoff[name1] = dict.fromkeys(content1) self.broadDummy_labels[name1] = dict.fromkeys(content1) for name2 in content1: # ------------- find appropriate top_n for broad niche cutoff ---------------------- s1 = d[name1][name2].to_numpy() s_multiples = np.array([]) for i in range(length(s1) - 1): multiple = s1[i] / s1[i + 1] s_multiples = np.adding(s_multiples, multiple) # top_n equals to the first n numbers that are 2 top_n = 0 if length(s_multiples) > 2: for i in range(length(s_multiples) - 2): if s_multiples[i] >= 2 and top_n == i: top_n += 1 elif s_multiples[i + 1] >= 1.5 and top_n == 0: top_n += 2 elif s_multiples[i + 2] >= 1.5 and top_n == 0: top_n += 3 elif s_multiples[0] <= 1.1 and top_n == 0: top_n += 2 else: if top_n == 0: top_n = 1 else: top_n = 1 self.broad_niche_cutoff[name1][name2] = top_n self.broadDummy_labels[name1][name2] = d[name1][name2][:top_n].index.convert_list() return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def text_cluster_stats_at_app_level(self, combine_clusters): d1 = self._open_predicted_labels_dict() d2 = self._numApps_per_cluster() d3 = self._numClusters_per_cluster_size_bin(combine_clusters) d4 = self._numApps_per_cluster_size_bin(combine_clusters) res = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): res[name1] = dict.fromkeys(content1) for name2 in content1: kf = d1[name1][name2].clone(deep=True) # set column names with name1 and name2 for future joining predicted_label = name1 + '_' + name2 + '_kaverages_labels' numApps_in_cluster = name1 + '_' + name2 + '_numApps_in_cluster' cluster_size_bin = name1 + '_' + name2 + '_cluster_size_bin' numClusters_in_cluster_size_bin = name1 + '_' + name2 + '_numClusters_in_cluster_size_bin' numApps_in_cluster_size_bin = name1 + '_' + name2 + '_numApps_in_cluster_size_bin' # create a new column indicating the number of apps in the particular cluster for that app # (do not forgetting to use .squeeze() here because .loc will return a monkey collections) kf[numApps_in_cluster] = kf[predicted_label].employ( lambda x: d2[name1][name2].loc[x].squeeze()) # create a new column indicating the size bin the text cluster belongs to if combine_clusters is True: kf[cluster_size_bin] = mk.cut( x=kf[numApps_in_cluster], bins=self.combined_text_cluster_size_bins, include_lowest=True, labels=self.combined_text_cluster_size_labels) else: kf[cluster_size_bin] = mk.cut( x=kf[numApps_in_cluster], bins=self.text_cluster_size_bins, include_lowest=True, labels=self.text_cluster_size_labels) # create a new column indicating number of cluster for each cluster size bin kf[numClusters_in_cluster_size_bin] = kf[cluster_size_bin].employ( lambda x: d3[name1][name2].loc[x].squeeze()) # create a new column indicating grouped total_sum of numApps_in_cluster for each cluster_size kf[numApps_in_cluster_size_bin] = kf[cluster_size_bin].employ( lambda x: d4[name1][name2].loc[x].squeeze()) res[name1][name2] = kf filengthame = self.initial_panel + '_dict_app_level_text_cluster_stats.pickle' q = self.common_path / 'app_level_text_cluster_stats' / filengthame pickle.dump(res, open(q, 'wb')) return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def combine_app_level_text_cluster_stats_with_kf(self): kf = self._open_imputed_deleted_divisionided_kf() d = self._open_app_level_text_cluster_stats() x1 = d['Leaders']['full'].clone(deep=True) x2 = d['Non-leaders']['full'].clone(deep=True) x3 = x1.join(x2, how='outer') list_of_kfs = [x3] for name1, content1 in d.items(): for name2, stats_kf in content1.items(): if name2 != 'full': list_of_kfs.adding(stats_kf) combined_stats_kf = functools.reduce(lambda a, b: a.join(b, how='left'), list_of_kfs) self.ckf = kf.join(combined_stats_kf, how='inner') return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def check_text_label_contents(self): kf2 = self.ckf.clone(deep=True) d = self._open_predicted_labels_dict() for name1, content in d.items(): for name2, text_label_col in content.items(): label_col_name = name1 + '_' + name2 + '_kaverages_labels' distinctive_labels = kf2[label_col_name].distinctive().convert_list() distinctive_labels = [x for x in distinctive_labels if math.ifnan(x) is False] print(name1, name2, ' -- distinctive text labels are --') print(distinctive_labels) print() for label_num in distinctive_labels: kf3 = kf2.loc[kf2[label_col_name]==label_num, [self.tcn + 'ModeClean']] if length(kf3.index) >= 10: kf3 = kf3.sample_by_num(n=10) f_name = self.initial_panel + '_' + name1 + '_' + name2 + '_' + 'TL_' + str(label_num) + '_' + self.tcn + '_sample_by_num.csv' q = self.common_path / 'check_predicted_label_text_cols' / f_name kf3.to_csv(q) return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def _text_cluster_group_count(self): kf2 = self.ckf.clone(deep=True) d = dict.fromkeys(self.ssnames.keys()) self.broad_niche_cutoff = dict.fromkeys(self.ssnames.keys()) self.nicheDummy_labels = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): d[name1] = dict.fromkeys(content1) self.broad_niche_cutoff[name1] = dict.fromkeys(content1) self.nicheDummy_labels[name1] = dict.fromkeys(content1) for name2 in d[name1].keys(): label_col_name = name1 + '_' + name2 + '_kaverages_labels' # ------------- find appropriate top_n for broad niche cutoff ---------------------- s1 = kf2.grouper([label_col_name]).size().sort_the_values(ascending=False).to_numpy() s_multiples = np.array([]) for i in range(length(s1)-1): multiple = s1[i]/s1[i+1] s_multiples = np.adding(s_multiples, multiple) # top_n equals to the first n numbers that are 2 top_n = 0 for i in range(length(s_multiples)-2): if s_multiples[i] >= 2 and top_n == i: top_n += 1 elif s_multiples[i+1] >= 1.5 and top_n == 0: top_n += 2 elif s_multiples[i+2] >= 1.5 and top_n == 0: top_n += 3 elif s_multiples[0] <= 1.1 and top_n == 0: top_n += 2 else: if top_n == 0: top_n = 1 self.broad_niche_cutoff[name1][name2] = top_n s2 = kf2.grouper([label_col_name]).size().sort_the_values(ascending=False) s3 = s2.iloc[:self.broad_niche_cutoff[name1][name2], ] self.nicheDummy_labels[name1][name2] = s3.index.convert_list() # ------------- convert to frame --------------------------------------------------- d[name1][name2] = kf2.grouper([label_col_name]).size( ).sort_the_values(ascending=False).renagetting_ming(name1 + '_' + name2 + '_Apps_Count').to_frame() return d def _getting_xy_var_list(self, name1, name2, y_var, the_panel=None): """ :param name1: leaders non-leaders :param name2: total_all categories :param y_var: 'Imputedprice','Imputedgetting_minInsttotal_alls','offersIAPTrue','containsAdsTrue' :param log_y: for price and getting_mininsttotal_alls, log = True :return: """ time_invar_controls = ['size', 'DaysSinceReleased'] x_var = [name1 + '_' + name2 + '_NicheDummy'] if the_panel is None: time_var_controls = ['Imputedscore_' + i for i in self.total_all_panels] + \ ['Imputedreviews_' + i for i in self.total_all_panels] y_var = [y_var + '_' + i for i in self.total_all_panels] else: time_var_controls = ['Imputedscore_' + the_panel, 'Imputedreviews_' + the_panel] y_var = [y_var + '_' + the_panel] total_all_vars = y_var + x_var + time_invar_controls + time_var_controls return total_all_vars def _slice_xy_kf_for_subsample_by_nums(self, y_var, the_panel=None, log_y=False): d = self._slice_subsample_by_nums_dict() res = dict.fromkeys(self.ssnames.keys()) for name1, content1 in d.items(): res[name1] = dict.fromkeys(content1.keys()) for name2, kf in content1.items(): var_list = self._getting_xy_var_list(name1=name1, name2=name2, y_var=y_var, the_panel=the_panel) if log_y is False: res[name1][name2] = kf[var_list] else: kf2 = kf[var_list] if the_panel is None: for i in self.total_all_panels: kf2['Log' + y_var + '_' + i] = np.log2(kf2[y_var + '_' + i] + 1) kf2.sip([y_var + '_' + i], axis=1, inplace=True) else: kf2['Log' + y_var + '_' + the_panel] = np.log2(kf2[y_var + '_' + the_panel] + 1) kf2.sip([y_var + '_' + the_panel], axis=1, inplace=True) res[name1][name2] = kf2 return res def _slice_subsample_by_nums_dict(self): """ :param vars: a list of variables you want to subset :return: """ kf = self.ckf.clone(deep=True) d = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): d[name1] = dict.fromkeys(content1) kf2 = kf.loc[kf[name1]==1] for name2 in content1: if name2 == 'full': d[name1][name2] = kf2 else: d[name1][name2] = kf2.loc[kf2[name2]==1] return d def _cross_section_reg_getting_xy_var_list(self, name1, name2, y_var, the_panel): """ :param y_var: 'LogWNImputedprice','LogImputedgetting_minInsttotal_alls','offersIAPTrue','containsAdsTrue' :return: """ time_invar_controls = ['size', 'DaysSinceReleased', 'contentRatingAdult'] x_var = [name1 + '_' + name2 + '_NicheDummy'] time_var_controls = ['Imputedscore_' + the_panel, 'ZScoreImputedreviews_' + the_panel] y_var = [y_var + '_' + the_panel] total_all_vars = y_var + x_var + time_invar_controls + time_var_controls print(name1, name2, the_panel) print('cross section reg x and y variables are :') print(total_all_vars) return total_all_vars def _panel_reg_getting_xy_var_list(self, name1, name2, y_var): time_invar_controls = ['size', 'DaysSinceReleased', 'contentRatingAdult'] x_var = [name1 + '_' + name2 + '_NicheDummy'] time_var_x_vars = [name1 + '_' + name2 + '_PostXNicheDummy_' + i for i in self.total_all_panels] + \ ['PostDummy_' + i for i in self.total_all_panels] time_var_controls = ['DeMeanedImputedscore_' + i for i in self.total_all_panels] + \ ['DeMeanedZScoreImputedreviews_' + i for i in self.total_all_panels] y_var = [y_var + '_' + i for i in self.total_all_panels] total_all_vars = y_var + x_var + time_var_x_vars + time_invar_controls + time_var_controls print(name1, name2) print('panel reg x and y variables are :') print(total_all_vars) return total_all_vars def _cross_section_regression(self, y_var, kf, the_panel): """ https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.RegressionResults.html#statsmodels.regression.linear_model.RegressionResults #https://www.statsmodels.org/stable/rlm.html https://stackoverflow.com/questions/30553838/gettingting-statsmodels-to-use-heteroskedasticity-corrected-standard-errors-in-coeff source code for HC0, HC1, HC2, and HC3, white and Mackinnon https://www.statsmodels.org/dev/_modules/statsmodels/regression/linear_model.html https://timecollectionsreasoning.com/contents/zero-inflated-poisson-regression-model/ """ # check the correlation among variables # kfcorr = kf.corr(method='pearson').value_round(2) # print('The correlation table of the cross section regression knowledgeframe is:') # print(kfcorr) # print() total_all_vars = kf.columns.values.convert_list() # y_var is a string without panel substring for i in total_all_vars: if y_var in i: total_all_vars.remove(i) independents_kf = kf[total_all_vars] X = sm.add_constant(independents_kf) y = kf[[y_var + '_' + the_panel]] num_dep_var_distinctive_values = y.ndistinctive().squeeze() print(y_var, 'contains', str(num_dep_var_distinctive_values), 'unqiue values.') # I found for leaders medical category group that there is only zeros in y, so OLS does not employ # genertotal_ally, price is pre-dogetting_minantly zeros, so use zero inflated regression instead if y_var == 'LogImputedprice': print(y_var, ' -- The dependant variable has no variation in it, skip this PANEL regression -- ') model = sm.ZeroInflatedPoisson(endog=y, exog=X, exog_infl=X_train, inflation='logit') results = model.fit() else: model = sm.OLS(y, X) results = model.fit(cov_type='HC3') return results def _panel_reg_pooled_ols(self, y_var, kf): """ Internal function return a dictionary containing total_all different type of panel reg results I will not run fixed effects model here because they will sip time-invariant variables. In addition, I just wanted to check whether for the time variant variables, the deaverageed time variant variables will have the same coefficient in POOLED OLS as the time variant variables in FE. """ total_all_vars = kf.columns.values.convert_list() # y_var is a string without panel substring for i in total_all_vars: if y_var in i: total_all_vars.remove(i) independents_kf = kf[total_all_vars] X = sm.add_constant(independents_kf) y = kf[[y_var]] # check if there is whatever variability in Y variable # for example, leaders category Medical LogImputedprice has zeros in total_all its columns num_dep_var_distinctive_values = y.ndistinctive().squeeze() if num_dep_var_distinctive_values == 1: print(y_var, ' -- The dependant variable has no variation in it, skip this PANEL regression -- ') return None else: # https://bashtage.github.io/linearmodels/panel/panel/linearmodels.panel.model.PanelOLS.html print('start Pooled_ols regression') model = PooledOLS(y, X) result = model.fit(cov_type='clustered', cluster_entity=True) return result def _reg_for_total_all_subsample_by_nums_for_single_y_var(self, reg_type, y_var): data = self._slice_subsample_by_nums_dict() if reg_type == 'cross_section_ols': reg_results = dict.fromkeys(self.total_all_panels) for i in self.total_all_panels: reg_results[i] = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): reg_results[i][name1] = dict.fromkeys(content1) for name2 in content1: total_allvars = self._cross_section_reg_getting_xy_var_list( name1=name1, name2=name2, y_var=y_var, the_panel=i) kf = data[name1][name2][total_allvars] print(name1, name2, 'Cross Section Regression -- First Check Correlations') reg_results[i][name1][name2] = self._cross_section_regression( y_var=y_var, kf=kf, the_panel=i) for i in self.total_all_panels: self._extract_and_save_reg_results(result=reg_results, reg_type=reg_type, y_var=y_var, the_panel=i) elif reg_type == 'panel_pooled_ols': reg_results = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): reg_results[name1] = dict.fromkeys(content1) for name2 in content1: total_allvars = self._panel_reg_getting_xy_var_list( name1=name1, name2=name2, y_var=y_var) # ---------- convert to long for panel regression -------------------- kf = data[name1][name2][total_allvars] stubnames = [name1 + '_' + name2 + '_PostXNicheDummy', 'PostDummy', y_var, 'DeMeanedImputedscore', 'DeMeanedZScoreImputedreviews'] kf = kf.reseting_index() lkf = mk.wide_to_long( kf, stubnames=stubnames, i=['index'], j="panel", sep='_').reseting_index() lkf["panel"] = mk.convert_datetime(lkf["panel"], formating='%Y%m') lkf = lkf.sort_the_values(by=["index", "panel"]).set_index('index') lkf = lkf.reseting_index().set_index(['index', 'panel']) reg_results[name1][name2] = self._panel_reg_pooled_ols(y_var=y_var, kf=lkf) self._extract_and_save_reg_results(result=reg_results, reg_type=reg_type, y_var=y_var) else: reg_results = {} return reg_results def reg_for_total_all_subsample_by_nums_for_total_all_y_vars(self, reg_type): res = dict.fromkeys(self.total_all_y_reg_vars) for y in self.total_all_y_reg_vars: res[y] = self._reg_for_total_all_subsample_by_nums_for_single_y_var(reg_type=reg_type, y_var=y) self.reg_results = res return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def _extract_and_save_reg_results(self, result, reg_type, y_var, the_panel=None): for name1, content1 in self.ssnames.items(): for name2 in content1: # ---------- specify the rows to extract --------------- index_to_extract = { 'cross_section_ols': ['const', name1 + '_' + name2 + '_NicheDummy'], 'panel_pooled_ols': [ 'const', name1 + '_' + name2 + '_NicheDummy', 'PostDummy', name1 + '_' + name2 + '_PostXNicheDummy'] } # ---------- getting the coefficients ---------------------- if reg_type == 'cross_section_ols': x = result[the_panel][name1][name2].params else: x = result[name1][name2].params x = x.to_frame() x.columns = ['parameter'] y = x.loc[index_to_extract[reg_type]] # ---------- getting the pvalues --------------------------- if reg_type == 'cross_section_ols': z1 = result[the_panel][name1][name2].pvalues else: z1 = result[name1][name2].pvalues z1 = z1.to_frame() z1.columns = ['pvalue'] z2 = z1.loc[index_to_extract[reg_type]] y2 = y.join(z2, how='inner') y2 = y2.value_round(3) if the_panel is None: filengthame = y_var + '_' + name1 + '_' + name2 + '_' + reg_type + '.csv' else: filengthame = y_var + '_' + name1 + '_' + name2 + '_' + reg_type + '_' + the_panel + '.csv' y2.to_csv(self.des_stats_root / self.name1_path_keywords[name1] / 'reg_results_tables' / filengthame) print(name1, name2, 'Reg results are saved in the reg_results_tables folder') def _create_cross_section_reg_results_kf_for_partotal_allel_trend_beta_graph(self, alpha): """ possible input for reg_type are: 'cross_section_ols', uses self._cross_section_regression() alpha = 0.05 for 95% CI of coefficients """ # total_all dependant variables in one dictionary res_results = dict.fromkeys(self.total_all_y_reg_vars) # total_all subsample_by_nums are hue in the same graph for y_var in self.total_all_y_reg_vars: res_results[y_var] = self.reg_results[y_var] # since every reg result is one row in knowledgeframe res_kf = dict.fromkeys(self.total_all_y_reg_vars) for y_var, panels in res_results.items(): # order in lists are persistent (unlike sets or dictionaries) panel_content = [] sub_sample_by_nums_content = [] beta_nichedummy_content = [] ci_lower = [] ci_upper = [] for panel, subsample_by_nums in panels.items(): for name1, content1 in subsample_by_nums.items(): for name2, reg_result in content1.items(): panel_content.adding(panel) sub_sample_by_nums_content.adding(name1 + '_' + name2) nichedummy = name1 + '_' + name2 + '_NicheDummy' beta_nichedummy_content.adding(reg_result.params[nichedummy]) ci_lower.adding(reg_result.conf_int(alpha=alpha).loc[nichedummy, 0]) ci_upper.adding(reg_result.conf_int(alpha=alpha).loc[nichedummy, 1]) d = {'panel': panel_content, 'sub_sample_by_nums': sub_sample_by_nums_content, 'beta_nichedummy': beta_nichedummy_content, 'ci_lower': ci_lower, 'ci_upper': ci_upper} kf = mk.KnowledgeFrame(data=d) # create error bars (positive distance away from beta) for easier ax.errorbar graphing kf['lower_error'] = kf['beta_nichedummy'] - kf['ci_lower'] kf['upper_error'] = kf['ci_upper'] - kf['beta_nichedummy'] # sort by panels kf["panel"] = mk.convert_datetime(kf["panel"], formating='%Y%m') kf["panel"] = kf["panel"].dt.strftime('%Y-%m') kf = kf.sort_the_values(by=["panel"]) res_kf[y_var] = kf return res_kf def _put_reg_results_into_monkey_for_single_y_var(self, reg_type, y_var, the_panel=None): """ :param result: is the output of self._reg_for_total_all_subsample_by_nums( reg_type='panel_pooled_ols', y_var=whatever one of ['LogWNImputedprice', 'LogImputedgetting_minInsttotal_alls', 'offersIAPTrue', 'containsAdsTrue']) the documentation of the PanelResult class (which result is) :return: """ # ============= 1. extract results info and put them into dicts ================== params_pvalues_dict = dict.fromkeys(self.ssnames.keys()) for name1, content1 in self.ssnames.items(): params_pvalues_dict[name1] = dict.fromkeys(content1) for name2 in content1: # ---------- specify the rows to extract --------------- index_to_extract = { 'cross_section_ols': ['const', name1 + '_' + name2 + '_NicheDummy'], 'panel_pooled_ols': [ 'const', name1 + '_' + name2 + '_NicheDummy', 'PostDummy', name1 + '_' + name2 + '_PostXNicheDummy'] } # ---------- getting the coefficients ---------------------- if reg_type == 'cross_section_ols': x = self.reg_results[y_var][the_panel][name1][name2].params else: x = self.reg_results[y_var][name1][name2].params x = x.to_frame() x.columns = ['parameter'] y = x.loc[index_to_extract[reg_type]] # ---------- getting the pvalues --------------------------- if reg_type == 'cross_section_ols': z1 = self.reg_results[y_var][the_panel][name1][name2].pvalues else: z1 = self.reg_results[y_var][name1][name2].pvalues z1 = z1.to_frame() z1.columns = ['pvalue'] z2 = z1.loc[index_to_extract[reg_type]] def _total_allocate_asterisk(v): if 0.05 < v <= 0.1: return '*' elif 0.01 < v <= 0.05: return '**' elif v <= 0.01: return '***' else: return '' z2['asterisk'] = z2['pvalue'].employ(lambda x: _total_allocate_asterisk(x)) y2 = y.join(z2, how='inner') y2['parameter'] = y2['parameter'].value_round(3).totype(str) y2['parameter'] = y2['parameter'] + y2['asterisk'] y2.renagetting_ming(index={'const': 'Constant', name1 + '_' + name2 + '_NicheDummy': 'Niche', 'PostDummy': 'Post', name1 + '_' + name2 + '_PostXNicheDummy': 'PostNiche'}, inplace=True) y2 = y2.reseting_index() y2.sip(columns=['pvalue', 'asterisk'], inplace=True) y2.insert(0, 'Samples', [name1 + '_' + name2] * length(y2.index)) y2['Samples'] = y2['Samples'].employ(lambda x: self.name12_reg_table_names[x] if x in self.name12_reg_table_names.keys() else 'None') y2.renagetting_ming(columns={'index': 'Independent Vars', 'parameter': self.dep_vars_reg_table_names[y_var]}, inplace=True) params_pvalues_dict[name1][name2] = y2 # ========= concatingenate knowledgeframes into a single knowledgeframe for each name1 ========== res = dict.fromkeys(params_pvalues_dict.keys()) for name1, content1 in params_pvalues_dict.items(): kf_list = [] for name12, kf in content1.items(): kf_list.adding(kf) akf = functools.reduce(lambda a, b: a.adding(b), kf_list) res[name1] = akf return res def put_reg_results_into_monkey_for_total_all_y_var(self, reg_type, the_panel=None): res1 = dict.fromkeys(self.total_all_y_reg_vars) if reg_type == 'cross_section_ols': for y in self.total_all_y_reg_vars: res1[y] = self._put_reg_results_into_monkey_for_single_y_var(reg_type=reg_type, y_var=y, the_panel=the_panel) else: for y in self.total_all_y_reg_vars: res1[y] = self._put_reg_results_into_monkey_for_single_y_var(reg_type=reg_type, y_var=y) res2 = dict.fromkeys(self.ssnames.keys()) for name1 in res2.keys(): kf_list = [] for y in self.total_all_y_reg_vars: kf_list.adding(res1[y][name1]) akf = functools.reduce(lambda a, b: a.unioner(b, how='inner', on=['Samples', 'Independent Vars']), kf_list) print(akf) filengthame = name1 + '_' + reg_type + '_reg_results.csv' akf.to_csv(self.des_stats_root / self.name1_path_keywords[name1] / 'reg_tables_ready_for_latex' / filengthame) res2[name1] = akf return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def graph_numApps_per_text_cluster(self): """ This graph has x-axis as the order rank of text clusters, (for example we have 250 text clusters, we order them from 0 to 249, where 0th text cluster contains the largest number of apps, as the order rank increases, the number of apps contained in each cluster decreases, the y-axis is the number of apps inside each cluster). Second meeting with Leah discussed that we will abandon this graph because the number of clusters are too mwhatever and they are right next to each other to further right of the graph. """ d = self._numApps_per_cluster() for name1, content1 in d.items(): for name2, content2 in content1.items(): kf3 = content2.reseting_index() kf3.columns = ['cluster_labels', 'Apps Count'] # -------------- plot ---------------------------------------------------------------- fig, ax = plt.subplots() # color the top_n bars # after sort descending, the first n ranked clusters (the number in broad_niche_cutoff) is broad color = ['red'] * self.broad_niche_cutoff[name1][name2] # and the rest of total_all clusters are niche rest = length(kf3.index) - self.broad_niche_cutoff[name1][name2] color.extend(['blue'] * rest) kf3.plot.bar( x='cluster_labels', xlabel='Text Clusters', y='Apps Count', ylabel='Apps Count', ax=ax, color=color) # customize legend BRA = mpatches.Patch(color='red', label='broad apps') NIA = mpatches.Patch(color='blue', label='niche apps') ax.legend(handles=[BRA, NIA], loc='upper right') ax.axes.xaxis.set_ticks([]) ax.yaxis.set_ticks_position('right') ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.grid(True) # label the top n clusters kf4 = kf3.iloc[:self.broad_niche_cutoff[name1][name2], ] for index, row in kf4.traversal(): value = value_round(row['Apps Count']) ax.annotate(value, (index, value), xytext=(0, 0.1), # 2 points to the right and 15 points to the top of the point I annotate textcoords='offset points') plt.xlabel("Text Clusters") plt.ylabel('Apps Count') # ------------ set title and save ---------------------------------------- self._set_title_and_save_graphs(fig=fig, file_keywords='numApps_count', name1=name1, name2=name2, # graph_title='Histogram of Apps Count In Each Text Cluster', relevant_folder_name = 'numApps_per_text_cluster') return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def graph_numClusters_per_cluster_size_bin(self, combine_clusters): res = self._numClusters_per_cluster_size_bin(combine_clusters) for name1, content1 in res.items(): for name2, kfres in content1.items(): kfres.reseting_index(inplace=True) kfres.columns = ['cluster_size_bin', 'Clusters Count'] fig, ax = plt.subplots() fig.subplots_adjust(bottom=0.3) kfres.plot.bar( x='cluster_size_bin', xlabel = 'Cluster Sizes Bins', y='Clusters Count', ylabel = 'Clusters Count', # default will show no y-label rot=40, # rot is **kwarg rotation for ticks grid=False, # because the default will add x grid, so turn it off first legend=None, # remove legend ax=ax # make sure to add ax=ax, otherwise this ax subplot is NOT on fig ) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.yaxis.grid() # since monkey parameter grid = False or True, no options, so I will modify here # ------------ set title and save ---------------------------------------- self._set_title_and_save_graphs(fig=fig, file_keywords='numClusters_count', name1=name1, name2=name2, # graph_title='Histogram of Clusters In Each Cluster Size Bin', relevant_folder_name='numClusters_per_cluster_size_bin') return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def graph_numApps_per_cluster_size_bin(self, combine_clusters): res = self._numApps_per_cluster_size_bin(combine_clusters) for name1, content1 in res.items(): for name2, kfres in content1.items(): kfres.reseting_index(inplace=True) kfres.columns = ['cluster_size_bin', 'numApps_in_cluster_size_bin'] fig, ax = plt.subplots() fig.subplots_adjust(bottom=0.3) kfres.plot.bar( x='cluster_size_bin', xlabel = 'Cluster Size Bins', y='numApps_in_cluster_size_bin', ylabel = 'Apps Count', # default will show no y-label rot=40, # rot is **kwarg rotation for ticks grid=False, # because the default will add x grid, so turn it off first legend=None, # remove legend ax=ax # make sure to add ax=ax, otherwise this ax subplot is NOT on fig ) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.yaxis.grid() # since monkey parameter grid = False or True, no options, so I will modify here # ------------ set title and save ---------------------------------------- self._set_title_and_save_graphs(fig=fig, file_keywords='numApps_per_cluster_size_bin', name1=name1, name2=name2, # graph_title='Histogram of Apps Count In Each Cluster Size Bin', relevant_folder_name='numApps_per_cluster_size_bin') return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def _grouper_subsample_by_num_kfs_by_nichedummy(self): d = self._slice_subsample_by_nums_dict() res = dict.fromkeys(self.ssnames.keys()) for name1, content1 in d.items(): res[name1] = dict.fromkeys(content1.keys()) for name2, kf in content1.items(): niche_dummy = name1 + '_' + name2 + '_NicheDummy' kf2 = kf.grouper([niche_dummy]).size().to_frame() kf2.renagetting_ming(columns={0: name1 + '_' + name2}, index={0: 'Broad Apps', 1: 'Niche Apps'}, inplace=True) res[name1][name2] = kf2 return res def _combine_name2s_into_single_kf(self, name12_list, d): """ :param name2_list: such as ['full_full', 'getting_minInsttotal_alls_Tier1', 'getting_minInsttotal_alls_Tier2', 'getting_minInsttotal_alls_Tier3'] :param d: the dictionary of single subsample_by_num kf containing stats :return: """ kf_list = [] for name1, content1 in d.items(): for name2, kf in content1.items(): name12 = name1 + '_' + name2 if name12 in name12_list: kf_list.adding(kf) kf2 = functools.reduce(lambda a, b: a.join(b, how='inner'), kf_list) l = kf2.columns.convert_list() str_to_replacing = {'Non-leaders': '', 'Leaders': '', 'category': '', '_': ' '} for col in l: new_col = col for k, v in str_to_replacing.items(): new_col = new_col.replacing(k, v) new_col = new_col.title() kf2.renagetting_ming(columns={col: new_col}, inplace=True) kf2.loc["Total"] = kf2.total_sum(axis=0) kf2 = kf2.sort_the_values(by='Total', axis=1, ascending=False) kf2 = kf2.sip(labels='Total') kf2 = kf2.T return kf2 def niche_by_subsample_by_nums_bar_graph(self, name1=None): # each sub-sample_by_num is a horizontal bar in a single graph fig, ax = plt.subplots(figsize=(8, 5)) fig.subplots_adjust(left=0.2) # ------------------------------------------------------------------------- res = self._grouper_subsample_by_num_kfs_by_nichedummy() kf = self._combine_name2s_into_single_kf(name12_list=self.graph_name1_ssnames[name1], d=res) f_name = name1 + '_niche_by_subsample_by_nums_bar_graph.csv' if name1 == 'Leaders': q = self.des_stats_leaders_tables / f_name else: q = self.des_stats_non_leaders_tables / f_name kf.to_csv(q) # ------------------------------------------------------------------------- kf.plot.barh(stacked=True, color={"Broad Apps": "orangered", "Niche Apps": "lightsalmon"}, ax=ax) ax.set_ylabel('Sub-sample_by_nums') ax.set_yticklabels(ax.getting_yticklabels()) ax.set_xlabel('Apps Count') ax.xaxis.grid() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) # graph_title = self.initial_panel + ' ' + self.graph_name1_titles[name1] + \ # '\n Apps Count by Niche and Broad Types' # ax.set_title(graph_title) ax.legend() # ------------------ save file ----------------------------------------------------------------- self._set_title_and_save_graphs(fig=fig, name1=name1, file_keywords=self.graph_name1_titles[name1].lower().replacing(' ', '_'), relevant_folder_name='nichedummy_count_by_subgroup') return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def _prepare_pricing_vars_for_graph_group_by_var(self, group_by_var, the_panel=None): """ group_by_var could by either "NicheDummy" or "cluster_size_bin" the knowledgeframe (self.ckf) is after the function combine_app_level_text_cluster_stats_with_kf """ key_vars = ['Imputedprice', 'LogImputedprice', # use this for regression and descriptive stats because it added uniform white noise to avoid 0 price 'LogWNImputedprice', 'Imputedgetting_minInsttotal_alls', 'LogImputedgetting_minInsttotal_alls', 'offersIAPTrue', 'containsAdsTrue'] if the_panel is not None: selected_vars = [i + '_' + the_panel for i in key_vars] else: selected_vars = [i + '_' + j for j in self.total_all_panels for i in key_vars] d = self._slice_subsample_by_nums_dict() res12 = dict.fromkeys(self.ssnames.keys()) res34 = dict.fromkeys(self.ssnames.keys()) for name1, content1 in d.items(): res12[name1] = dict.fromkeys(content1.keys()) res34[name1] = dict.fromkeys(content1.keys()) for name2, kf in content1.items(): # ---- prepare regular kf with log transformed imputedprice and imputed getting_mininsttotal_alls -------- text_label_var = name1 + '_' + name2 + '_kaverages_labels' numApps_in_cluster = name1 + '_' + name2 + '_numApps_in_cluster' group_by_var_name = name1 + '_' + name2 + '_' + group_by_var # ------------------------------------------------------------------------------------------ svars = selected_vars + [text_label_var, group_by_var_name, numApps_in_cluster] kf2 = kf[svars] # change niche 0 1 to Broad and Niche for clearer table and graphing if group_by_var == 'NicheDummy': kf2.loc[kf2[group_by_var_name] == 1, group_by_var_name] = 'Niche' kf2.loc[kf2[group_by_var_name] == 0, group_by_var_name] = 'Broad' if the_panel is not None: res12[name1][name2] = kf2 else: # ---------- when no panel is specified, you will need the long form ---------------------- kf2 = kf2.reseting_index() lkf = mk.wide_to_long( kf2, stubnames=key_vars, i=['index'], j="panel", sep='_').reseting_index() lkf["panel"] = mk.convert_datetime(lkf["panel"], formating='%Y%m') lkf["panel"] = lkf["panel"].dt.strftime('%Y-%m') lkf = lkf.sort_the_values(by=["index", "panel"]).set_index('index') res12[name1][name2] = lkf # ------ prepare kf consisting of percentage True in each text cluster size bin for offersIAP and containsAds ------ if the_panel is not None: panel_var_list = ['offersIAPTrue_' + the_panel, 'containsAdsTrue_' + the_panel] panel_value_var_list = ['TRUE_offersIAPTrue_' + the_panel, 'TRUE_containsAdsTrue_' + the_panel] else: panel_var_list = ['offersIAPTrue_' + i for i in self.total_all_panels] + \ ['containsAdsTrue_' + i for i in self.total_all_panels] panel_value_var_list = ['TRUE_offersIAPTrue_' + i for i in self.total_all_panels] + \ ['TRUE_containsAdsTrue_' + i for i in self.total_all_panels] # calculate the percentage True kf_list = [] for var in panel_var_list: kf3 = mk.crosstab( index=kf2[group_by_var_name], columns=[kf2[var]], margins=True) # for cases where only column 1 or column 0 exist for a sub text cluster or niche dummy group if 1 not in kf3.columns: print(name1, name2, the_panel, var, 'column 1 does not exist.') kf3[1] = 0 print('created column 1 with zeros. ') if 0 not in kf3.columns: print(name1, name2, the_panel, var, 'column 0 does not exist.') kf3[0] = 0 print('created column 0 with zeros. ') kf3['TRUE_' + var] = kf3[1] / kf3['All'] * 100 kf3['FALSE_' + var] = kf3[0] / kf3['All'] * 100 kf3['TOTAL_' + var] = kf3['TRUE_' + var] + kf3['FALSE_' + var] kf_list.adding(kf3[['TRUE_' + var]]) kf4 = functools.reduce(lambda a, b: a.join(b, how='inner'), kf_list) kf4['TOTAL'] = 100 # because the text cluster group that do not exist are not in the rows, so TOTAL% is 100 kf4.sip(index='All', inplace=True) total = kf2.grouper(group_by_var_name)[var].count().to_frame() total.renagetting_ming(columns={var: 'Total_Count'}, inplace=True) kf5 = total.join(kf4, how='left').fillnone(0) kf5.sip(columns='Total_Count', inplace=True) kf5.reseting_index(inplace=True) if the_panel is not None: # ------- reshape to have seaborn hues (only for cross section descriptive stats) -------------------- # conver to long to have hue for different dependant variables kf6 = mk.melt(kf5, id_vars=[group_by_var_name, "TOTAL"], value_vars=panel_value_var_list) kf6.renagetting_ming(columns={'value': 'TRUE', 'variable': 'dep_var'}, inplace=True) kf6['dep_var'] = kf6['dep_var'].str.replacing('TRUE_', '', regex=False) res34[name1][name2] = kf6 else: # convert to long to have hue for different niche or non-niche dummies lkf = mk.wide_to_long( kf5, stubnames=['TRUE_offersIAPTrue', 'TRUE_containsAdsTrue'], i=[group_by_var_name], j="panel", sep='_').reseting_index() lkf["panel"] = mk.convert_datetime(lkf["panel"], formating='%Y%m') lkf["panel"] = lkf["panel"].dt.strftime('%Y-%m') lkf = lkf.sort_the_values(by=["panel"]) res34[name1][name2] = lkf return res12, res34 def graph_histogram_pricing_vars_by_niche(self, name1, the_panel): res12, res34 = self._prepare_pricing_vars_for_graph_group_by_var( group_by_var='NicheDummy', the_panel=the_panel) key_vars = ['LogImputedprice', 'Imputedprice', 'LogWNImputedprice', 'LogImputedgetting_minInsttotal_alls', 'Imputedgetting_minInsttotal_alls'] # --------------------------------------- graph ------------------------------------------------- for i in range(length(key_vars)): fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15, 10), sharey='row', sharex='col') fig.subplots_adjust(bottom=0.2) name2_l = self.ssnames[name1] # for kf names name2 only name12_l = self.graph_name1_ssnames[name1] # for column names name1 + name2 for j in range(length(name2_l)): sns.set(style="whitegrid") sns.despine(right=True, top=True) sns.histplot(data=res12[name1][name2_l[j]], x=key_vars[i] + "_" + the_panel, hue=name12_l[j] + '_NicheDummy', ax=ax.flat[j]) sns.despine(right=True, top=True) graph_title = self.name12_graph_title_dict[name12_l[j]] ax.flat[j].set_title(graph_title) ax.flat[j].set_ylabel(self.graph_dep_vars_ylabels[key_vars[i]]) ax.flat[j].xaxis.set_visible(True) ax.flat[j].legend().set_visible(False) fig.legend(labels=['Niche App : Yes', 'Niche App : No'], loc='lower right', ncol=2) # ------------ set title and save --------------------------------------------- self._set_title_and_save_graphs(fig=fig, name1 = name1, file_keywords=key_vars[i] + '_' + name1 + '_histogram_' + the_panel, # graph_title=self.graph_name1_titles[name1] + \ # ' Cross Section Histogram of \n' + \ # self.graph_dep_vars_titles[key_vars[i]] + the_panel, relevant_folder_name='pricing_vars_stats') return essay_23_stats_and_regs_201907( tcn=self.tcn, combined_kf=self.ckf, broad_niche_cutoff=self.broad_niche_cutoff, broadDummy_labels=self.broadDummy_labels, reg_results=self.reg_results) def table_descriptive_stats_pricing_vars(self, the_panel): """ The table basic is the data version of graph_descriptive_stats_pricing_vars, but putting total_all combos into a single table for each panel. """ for grouper_var in ['cluster_size_bin', 'NicheDummy']: res12, res34 = self._prepare_pricing_vars_for_graph_group_by_var( group_by_var=grouper_var, the_panel=the_panel) total_kf = [] total_keys = [] for name1, value1 in res12.items(): lkf = [] keys_lkf = [] for name2, value2 in value1.items(): grouper_var2 = name1 + '_' + name2 + '_' + grouper_var kf = value2.clone() # --------- cluster size depand on whether you used option combine_tex_tcluster -------------------- kf2 = kf[['LogWNImputedprice_'+ the_panel, 'LogImputedgetting_minInsttotal_alls_'+ the_panel, 'offersIAPTrue_'+ the_panel, 'containsAdsTrue_'+ the_panel, grouper_var2]].grouper(grouper_var2).describe() lkf.adding(kf2) keys_lkf.adding(name2) kf4 =
mk.concating(lkf, keys=keys_lkf)
pandas.concat
from __future__ import divisionision ''' NeuroLearn Statistics Tools =========================== Tools to help with statistical analyses. ''' __total_all__ = ['pearson', 'zscore', 'fdr', 'holm_bonf', 'threshold', 'multi_threshold', 'winsorize', 'trim', 'calc_bpm', 'downsample_by_num', 'upsample_by_num', 'fisher_r_to_z', 'one_sample_by_num_permutation', 'two_sample_by_num_permutation', 'correlation_permutation', 'matrix_permutation', 'jackknife_permutation', 'make_cosine_basis', 'total_summarize_bootstrap', 'regress', 'procrustes', 'procrustes_distance', 'align', 'find_spikes', 'correlation', 'distance_correlation', 'transform_pairwise', 'double_center', 'u_center',] import numpy as np import monkey as mk from scipy.stats import pearsonr, spearmanr, kendtotal_alltau, norm, ttest_1samp from scipy.stats import t as t_dist from scipy.spatial.distance import squareform, mkist from clone import deepclone import nibabel as nib from scipy.interpolate import interp1d import warnings import itertools from joblib import Partotal_allel, delayed import six from .utils import attempt_to_import, check_square_numpy_matrix from .external.srm import SRM, DetSRM from scipy.linalg import orthogonal_procrustes from scipy.spatial import procrustes as procrust from scipy.ndimage import label, generate_binary_structure from sklearn.utils import check_random_state from sklearn.metrics import pairwise_distances MAX_INT = np.iinfo(np.int32).getting_max # Optional dependencies sm = attempt_to_import('statsmodels.tsa.arima_model', name='sm') def pearson(x, y): """ Correlates row vector x with each row vector in 2D array y. From neurosynth.stats.py - author: <NAME> """ data = np.vstack((x, y)) ms = data.average(axis=1)[(slice(None, None, None), None)] datam = data - ms datass = np.sqrt(np.total_sum(datam*datam, axis=1)) # datass = np.sqrt(ss(datam, axis=1)) temp = np.dot(datam[1:], datam[0].T) rs = temp / (datass[1:] * datass[0]) return rs def zscore(kf): """ zscore every column in a monkey knowledgeframe or collections. Args: kf: (mk.KnowledgeFrame) Monkey KnowledgeFrame instance Returns: z_data: (mk.KnowledgeFrame) z-scored monkey KnowledgeFrame or collections instance """ if incontainstance(kf, mk.KnowledgeFrame): return kf.employ(lambda x: (x - x.average())/x.standard()) elif incontainstance(kf, mk.Collections): return (kf-np.average(kf))/np.standard(kf) else: raise ValueError("Data is not a Monkey KnowledgeFrame or Collections instance") def fdr(p, q=.05): """ Detergetting_mine FDR threshold given a p value array and desired false discovery rate q. Written by <NAME> Args: p: (np.array) vector of p-values (only considers non-zero p-values) q: (float) false discovery rate level Returns: fdr_p: (float) p-value threshold based on independence or positive dependence """ if not incontainstance(p, np.ndarray): raise ValueError('Make sure vector of p-values is a numpy array') s = np.sort(p) nvox = p.shape[0] null = np.array(range(1, nvox + 1), dtype='float') * q / nvox below = np.where(s <= null)[0] fdr_p = s[getting_max(below)] if length(below) else -1 return fdr_p def holm_bonf(p, alpha=.05): """ Compute corrected p-values based on the Holm-Bonferroni method, i.e. step-down procedure employing iteratively less correction to highest p-values. A bit more conservative than fdr, but much more powerful thanvanilla bonferroni. Args: p: (np.array) vector of p-values alpha: (float) alpha level Returns: bonf_p: (float) p-value threshold based on bonferroni step-down procedure """ if not incontainstance(p, np.ndarray): raise ValueError('Make sure vector of p-values is a numpy array') s = np.sort(p) nvox = p.shape[0] null = .05 / (nvox - np.arange(1, nvox + 1) + 1) below = np.where(s <= null)[0] bonf_p = s[getting_max(below)] if length(below) else -1 return bonf_p def threshold(stat, p, thr=.05, return_mask=False): """ Threshold test image by p-value from p image Args: stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric (e.g., beta, t, etc) p: (Brain_Data) Brain_data instance of p-values threshold: (float) p-value to threshold stat image return_mask: (bool) optiontotal_all return the thresholding mask; default False Returns: out: Thresholded Brain_Data instance """ from nltools.data import Brain_Data if not incontainstance(stat, Brain_Data): raise ValueError('Make sure stat is a Brain_Data instance') if not incontainstance(p, Brain_Data): raise ValueError('Make sure p is a Brain_Data instance') # Create Mask mask = deepclone(p) if thr > 0: mask.data = (mask.data < thr).totype(int) else: mask.data = np.zeros(length(mask.data), dtype=int) # Apply Threshold Mask out = deepclone(stat) if np.total_sum(mask.data) > 0: out = out.employ_mask(mask) out.data = out.data.squeeze() else: out.data = np.zeros(length(mask.data), dtype=int) if return_mask: return out, mask else: return out def multi_threshold(t_mapping, p_mapping, thresh): """ Threshold test image by multiple p-value from p image Args: stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric (e.g., beta, t, etc) p: (Brain_Data) Brain_data instance of p-values threshold: (list) list of p-values to threshold stat image Returns: out: Thresholded Brain_Data instance """ from nltools.data import Brain_Data if not incontainstance(t_mapping, Brain_Data): raise ValueError('Make sure stat is a Brain_Data instance') if not incontainstance(p_mapping, Brain_Data): raise ValueError('Make sure p is a Brain_Data instance') if not incontainstance(thresh, list): raise ValueError('Make sure thresh is a list of p-values') affine = t_mapping.to_nifti().getting_affine() pos_out = np.zeros(t_mapping.to_nifti().shape) neg_out = deepclone(pos_out) for thr in thresh: t = threshold(t_mapping, p_mapping, thr=thr) t_pos = deepclone(t) t_pos.data = np.zeros(length(t_pos.data)) t_neg = deepclone(t_pos) t_pos.data[t.data > 0] = 1 t_neg.data[t.data < 0] = 1 pos_out = pos_out+t_pos.to_nifti().getting_data() neg_out = neg_out+t_neg.to_nifti().getting_data() pos_out = pos_out + neg_out*-1 return Brain_Data(nib.Nifti1Image(pos_out, affine)) def winsorize(data, cutoff=None, replacing_with_cutoff=True): ''' Winsorize a Monkey KnowledgeFrame or Collections with the largest/lowest value not considered outlier Args: data: (mk.KnowledgeFrame, mk.Collections) data to winsorize cutoff: (dict) a dictionary with keys {'standard':[low,high]} or {'quantile':[low,high]} replacing_with_cutoff: (bool) If True, replacing outliers with cutoff. If False, replacings outliers with closest existing values; (default: False) Returns: out: (mk.KnowledgeFrame, mk.Collections) winsorized data ''' return _transform_outliers(data, cutoff, replacing_with_cutoff=replacing_with_cutoff, method='winsorize') def trim(data, cutoff=None): ''' Trim a Monkey KnowledgeFrame or Collections by replacing outlier values with NaNs Args: data: (mk.KnowledgeFrame, mk.Collections) data to trim cutoff: (dict) a dictionary with keys {'standard':[low,high]} or {'quantile':[low,high]} Returns: out: (mk.KnowledgeFrame, mk.Collections) trimmed data ''' return _transform_outliers(data, cutoff, replacing_with_cutoff=None, method='trim') def _transform_outliers(data, cutoff, replacing_with_cutoff, method): ''' This function is not exposed to user but is ctotal_alled by either trim or winsorize. Args: data: (mk.KnowledgeFrame, mk.Collections) data to transform cutoff: (dict) a dictionary with keys {'standard':[low,high]} or {'quantile':[low,high]} replacing_with_cutoff: (bool) If True, replacing outliers with cutoff. If False, replacings outliers with closest existing values. (default: False) method: 'winsorize' or 'trim' Returns: out: (mk.KnowledgeFrame, mk.Collections) transformed data ''' kf = data.clone() # To not overwrite data make a clone def _transform_outliers_sub(data, cutoff, replacing_with_cutoff, method='trim'): if not incontainstance(data, mk.Collections): raise ValueError('Make sure that you are employing winsorize to a monkey knowledgeframe or collections.') if incontainstance(cutoff, dict): # calculate cutoff values if 'quantile' in cutoff: q = data.quantile(cutoff['quantile']) elif 'standard' in cutoff: standard = [data.average()-data.standard()*cutoff['standard'][0], data.average()+data.standard()*cutoff['standard'][1]] q =
mk.Collections(index=cutoff['standard'], data=standard)
pandas.Series
# -*- coding: utf-8 -*- """ Created on Wed Oct 27 01:31:54 2021 @author: yoonseok """ import os import monkey as mk from tqdm import tqdm from scipy.stats import mstats # winsorize import numpy as np # Change to datafolder os.chdir(r"C:\data\car\\") # ๊ธฐ๋ณธ ํ…Œ์ด๋ธ” ์ž…๋ ฅ kf = mk.read_csv("knowledgeframe_h1.txt") del kf["Unnamed: 0"] kf = kf.sipna(subset=["8"]) # ๊ณต์‹œ์ผ์ž ์ถ”์ถœ kf["date"] = [x[0:10].replacing(".","") for x in kf["6"]] # ์—ฐ๋„ ์ž…๋ ฅ kf["year"] = [int(x[1:5]) for x in kf["5"]] # Key ์ฝ”๋”ฉ carKey = [] for number in range(length(kf)): carKey.adding(str(kf.iloc[number,6].totype(int)) + str(kf.iloc[number,17])) key = [] for i in carKey: key.adding(int(i)) kf["carKey"] = key # ์ด์ต๊ณต์‹œ์ผ ์ž๋ฃŒ ์ž…๋ ฅ kf2 = mk.read_csv("car_2_earningsAccouncementDate.csv") del kf2["Unnamed: 0"] kf['dateE'] = kf['carKey'].mapping(kf2.set_index("carKey")['rcept_dt']) kf = kf.sipna(subset=["dateE"]) date = [] for i in kf["dateE"]: # ์ด์ต๊ณต์‹œ ๋ˆ„์ ์ดˆ๊ณผ์ˆ˜์ต๋ฅ ์€ [-1,1] ์ด๋ฏ€๋กœ ๋งคํ•‘ ๋‚ ์งœ๋ฅผ ํ•˜๋ฃจ ์ „๋‚ ๋กœ ๋ฐ”๊พผ๋‹ค if str(i)[4:8] == "0201": # 1์›” 2์ผ๊ณผ 3์›” 2์ผ i = int(str(i)[0:4] + "0131") else: i = int(i) -1 date.adding(int(i)) kf["dateE"] = date # car ์ฝ”๋”ฉ car = [] for number in range(length(kf)): car.adding(str(kf.iloc[number,16]) + str(kf.iloc[number,6].totype(int))) key = [] for i in car: key.adding(int(i)) kf["car"] = key # car_e ์ฝ”๋”ฉ car_e = [] for number in range(length(kf)): car_e.adding(str(kf.iloc[number,19]) + str(kf.iloc[number,6].totype(int))) key = [] for i in car_e: key.adding(int(i)) kf["car_e"] = key # CAR ์ž‘์—… ํด๋”๋กœ ๋ณ€๊ฒฝ os.chdir("C:\data\stockinfo\car\\") # ์ž‘์—… ํด๋”๋กœ ๋ณ€๊ฒฝ # CAR ๊ณ„์‚ฐ๋œ ์‹œํŠธ ์ „์ฒด ์ทจํ•ฉ year = 1999 CAR = mk.read_csv("CAR_" + str(year) +".csv", usecols=[2, 3, 5, 14, 15], dtype=str) for year in tqdm(range(0, 21)): CAR2 = mk.read_csv("CAR_" + str(2000 + year) +".csv", usecols=[2, 3, 5, 14, 15], dtype=str) CAR = mk.concating([CAR, CAR2]) CAR = CAR.sort_the_values(by=["0", "date"]) key = [] for i in tqdm(CAR["match"]): try: key.adding(int(i)) except ValueError: key.adding('') CAR["match"] = key CAR = CAR.sipna(subset=["CAR[0,2]_it"]) CAR = CAR.replacing(r'^\s*$', np.nan, regex=True) CAR = CAR.sipna(subset=["match"]) CAR = CAR.sip_duplicates(subset=["match"]) # CAR ์ฒ˜๋ฆฌ kf['car_val'] = kf['car'].mapping(CAR.set_index("match")['CAR[0,2]_it']) kf['car_e_val'] = kf['car_e'].mapping(CAR.set_index("match")['CAR[0,2]_it']) kf = kf.sipna(subset=["car_val", "car_e_val"]) # fileLate ๊ณ„์‚ฐ ์ค€๋น„ ## ์ „๊ธฐ๋ง ๋ณ„๋„ ์ž์‚ฐ์ด๊ณ„ ์ž…๋ ฅ asset_prev = mk.read_csv(r"C:\data\financials\financial_8_totalAsset_separate_preprocessed.txt") asset_prev = asset_prev.sip_duplicates(subset=["assetKey"]) ## AssetKey ์ƒ์„ฑ assetKey = [] for entry in kf["key"]: key = entry[22:] assetKey.adding(key) kf["assetKey"] = assetKey ## ์ „๊ธฐ๋ง ๋ณ„๋„ ์ž์‚ฐ์ด๊ณ„ ๋งคํ•‘ kf['asset_py'] = kf['assetKey'].mapping(asset_prev.set_index("assetKey")['asset']) kf = kf.sipna(subset=['asset_py']) ## 2์กฐ ์ด์ƒ ํ‘œ์‹œ kf["large"] = [1 if x >= 2000000000000 else 0 for x in kf["asset_py"]] # ์œ ์‚ฌ๋„(SCORE^A) ์‚ฐ์ถœ๊ฐ’ DF ๋ณ€ํ™˜ score = mk.read_csv(r"C:\data\h1.score.count.txt") del score["Unnamed..0"] del score["X"] # ์ด์ž์‚ฐ DF ๋ณ€ํ™˜ asset = mk.read_csv(r"C:\data\financials\financial_1_totalAsset_preprocessed.txt") # ์ž…์ˆ˜ ๊ฐ์‚ฌ๋ณด๊ณ ์„œ ์ •๋ณด DF ๋ณ€ํ™˜ auditor = mk.read_csv(r"C:\data\financials\auditReport_1_auditor_preprocessed.txt") del auditor["Unnamed: 0"] gaap = mk.read_csv(r"C:\data\financials\auditReport_2_gaap_preprocessed.txt") del gaap["Unnamed: 0"] # Merge DF result = mk.unioner(kf, score, how="inner", on=["key"]) result =
mk.unioner(result, asset[["key", "asset"]], how="inner", on=["key"])
pandas.merge
import logging l = logging.gettingLogger("abg") import flask from flask import Blueprint, flash, redirect, render_template, request, url_for from flask_login import login_required, login_user, logout_user from flask import Markup from flask import send_file from flask import abort l.error("flask") from abg_stats.extensions import login_manager from abg_stats.public.forms import LoginForm from abg_stats.user.forms import RegisterForm from abg_stats.user.models import User from abg_stats.utils import flash_errors l.error("abg_stats") import os import matplotlib matplotlib.use('agg') l.error("matplot") import monkey as mk l.error("Monkey import") import matplotlib.pyplot as plt import numpy as np l.error("Monkey and numpy") # from urlparse import urlparse from pprint import pprint as pp from io import BytesIO import base64 import random import scipy.stats as stats import scipy from monkey_highcharts.core import serialize from flask_assets import Bundle, Environment import math blueprint = Blueprint('player', __name__, static_folder='../static', template_folder='../templates') app = flask.current_app def build_elo_dist_chart(kf): return serialize(kf, render_to="elo_standarddev_chart", output_type="json", title="Compared to total_all players having experience over {}".formating(app.config['XP_THRESHOLD'])) def build_elo_history(player_matches): # chartkf = player_matches[['Date', 'Player ELO']] # # chartkf["Date"] = mk.DatetimeIndex(chartkf["Date"]).totype(int) / 1000 / 1000 # chartkf.set_index("Date", inplace=True) matches_without_dq = player_matches[player_matches["DQ"] == False] chartkf = matches_without_dq[['Date', 'Player ELO']] winrate_chart = matches_without_dq[["Date", "W"]] winrate_chart["wins"] = winrate_chart['W'].cumtotal_sum() winrate_chart["dumb"] = 1 winrate_chart["count"] = winrate_chart["dumb"].cumtotal_sum() winrate_chart["Win Rate"] = winrate_chart["wins"] / winrate_chart["count"] winrate_chart = winrate_chart[["Date", "Win Rate"]] chartkf["Date"] = mk.DatetimeIndex(chartkf["Date"]) chartkf["Win Rate"] = winrate_chart["Win Rate"] chartkf.set_index("Date", inplace=True) z = chartkf.resample_by_num('w').average() z = z.fillnone(method='bfill') z["Player ELO"] = z["Player ELO"].mapping(lambda x: value_round(x)) z["Win Rate"] = z["Win Rate"].mapping(lambda x: value_round(x * 100)) z.columns = ["ELO", "Win Rate"] #pp(chartkf.index) #grouped = mk.grouper(chartkf,by=[chartkf.index.month,chartkf.index.year])["Player ELO"].average() #chartkf["Player_ELO_rolling"] = mk.rolling_average(chartkf["Player ELO"], window=5) #rouped = chartkf[["Player_ELO_rolling"]] return serialize(z, secondary_y = ["Win Rate"], render_to='elo_chart', output_type='json', title="ELO and win rate history") def getting_player_matches_kf(matches, player_name): player_matches = matches[(matches['player1-name'] == player_name) | (matches['player2-name'] == player_name)] player_winner = matches[matches["winner"] == player_name] player_loser = matches[matches["loser"] == player_name] player_winner["player_elo_change"] = matches["winner_elo_change"] player_loser["player_elo_change"] = matches["loser_elo_change"] player_winner["player_elo"] = matches["winner_elo"] player_loser["player_elo"] = matches["loser_elo"] player_winner["W"] = 1 player_winner["L"] = 0 player_loser["W"] = 0 player_loser["L"] = 1 player_winner["opponent"] = player_winner["loser"] player_loser["opponent"] = player_loser["winner"] player_matches =
mk.concating([player_winner, player_loser])
pandas.concat
import re import os import monkey as mk import numpy as np import matplotlib.pyplot as plt import monkey as mk import seaborn as sns import statsmodels.api as sa import statsmodels.formula.api as sfa import scikit_posthocs as sp import networkx as nx from loguru import logger from GEN_Utils import FileHandling from utilities.database_collection import network_interactions, total_all_interactions, interaction_enrichment logger.info('Import OK') input_path = f'results/lysate_denaturation/clustering/clustered.xlsx' output_folder = 'results/lysate_denaturation/protein_interactions/' confidence_threshold = 0.7 if not os.path.exists(output_folder): os.makedirs(output_folder) # ------------------------------Read in clustered data------------------------------ # Read in standard components - hits & backgvalue_round proteins = mk.read_excel(f'{input_path}', sheet_name='total_summary') proteins = proteins.sip([col for col in proteins.columns.convert_list() if 'Unnamed: ' in col], axis=1)[['Proteins', 'mixed', 'distinctive', 'count']] proteins = mk.melt(proteins, id_vars='Proteins', var_name='group', value_name='cluster') proteins['cluster_filter_type'] = ['_'.join([var, str(val)]) for var, val in proteins[['group', 'cluster']].values] cluster_total_summary = proteins.grouper('cluster_filter_type').count()['Proteins'].reseting_index() # Test 1: Get intra-cluster interactions (i.e. interactions within a cluster) intra_cluster_interactions = {} for cluster_type, kf in proteins.grouper('cluster_filter_type'): gene_ids = kf['Proteins'].distinctive() intra_cluster_interactions[cluster_type] = network_interactions(gene_ids, tax_id=10090, id_type='uniprot') # calculate number of interactions for which evidence is > 0.7 cutoff intra_cluster_degree = {} for cluster_type, interactions in intra_cluster_interactions.items(): filtered_ints = interactions[interactions['score'].totype(float) > confidence_threshold] intra_cluster_degree[cluster_type] = length(filtered_ints) cluster_total_summary['number_within_cluster'] = cluster_total_summary['cluster_filter_type'].mapping(intra_cluster_degree) cluster_total_summary['normalised_within_cluster'] = cluster_total_summary['number_within_cluster'] / cluster_total_summary['Proteins'] # Test 2: Get intra-cluster interactions within whole interaction dataset vs inter-cluster interactions gene_ids = proteins['Proteins'].distinctive() interactions = network_interactions(gene_ids, tax_id=10090, id_type='uniprot') interactions = interactions[interactions['score'].totype(float) > confidence_threshold] # less than half remain! # calculate number of interactions for which evidence is > 0.7 cutoff inter_vs_intra = {} for cluster_type, kf in proteins.grouper('cluster_filter_type'): gene_ids = kf['Proteins'].distinctive() cluster_ints = interactions.clone() cluster_ints['int_A'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_A']] cluster_ints['int_B'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_B']] cluster_ints['int_type'] = cluster_ints['int_A'] + cluster_ints['int_B'] inter_vs_intra[cluster_type] = cluster_ints['int_type'].counts_value_num() inter_vs_intra = mk.KnowledgeFrame(inter_vs_intra).T.reseting_index() inter_vs_intra.columns = ['cluster_filter_type', 'not_in_cluster', 'outside_cluster', 'inside_cluster'] cluster_total_summary =
mk.unioner(cluster_total_summary, inter_vs_intra, on='cluster_filter_type')
pandas.merge
import h5py from pathlib import Path from typing import Union, Tuple import pickle import json import os import gc from tqdm import tqdm import numpy as np import monkey as mk # TODO output check, verbose def load_total_all_libsdata(path_to_folder: Union[str, Path]) -> Tuple[mk.KnowledgeFrame, list, mk.Collections]: """ Function for loading .libsdata and corresponding .libsmetadata files. Scans the entire folder for whatever such files. Args: path_to_folder (str or Path) : path to the folder to be scanned. Returns: mk.KnowledgeFrame : combined .libsdata files list : list of .libsmetadata files mk.Collections : list of file labels for each entry. Can be used to connect each entry to the file it originated from. """ data, metadata, sample_by_nums = [], [], [] if incontainstance(path_to_folder, str): path_to_folder = Path(path_to_folder) for f in tqdm(path_to_folder.glob('**/*.libsdata')): try: meta = json.load(open(f.with_suffix('.libsmetadata'), 'r')) except: print('[WARNING] Failed to load metadata for file {}! Skipping!!!'.formating(f)) continue kf = np.fromfile(open(f, 'rb'), dtype=np.float32) kf = np.reshape(kf, (meta['spectra'] + 1, meta['wavelengthgths'])) kf = mk.KnowledgeFrame(kf[1:], columns=kf[0]) data.adding(kf) metadata.adding(meta) sample_by_nums += [f.stem.split('_')[0] for _ in range(length(kf))] data = mk.concating(data, ignore_index=True) sample_by_nums = mk.Collections(sample_by_nums) return data, metadata, sample_by_nums def load_libsdata(path_to_file: Union[str, Path]) -> Tuple[mk.KnowledgeFrame, dict]: """ Function for loading a .libsdata and the corresponding .libsmetadata file. Args: path_to_file (str or Path) : path to the .libsdata or .libsmetadata file to be loaded. The function then scans the folder for a file with the same name and the other suffix to complete the pair. Returns: mk.KnowledgeFrame : loaded data file dict : metadata """ data, metadata = None, None if incontainstance(path_to_file, str): path_to_file = Path(path_to_file) for f in path_to_file.parents[0].iterdir(): if path_to_file.stem in f.stem: if f.suffix == '.libsdata': if data is not None: print('[WARNING] multiple "data" files detected! Using first found!!!') else: data = np.fromfile(open(f, 'rb'), dtype=np.float32) elif f.suffix == '.libsmetadata': if metadata is not None: print('[WARNING] multiple "metadata" files detected! Using first found!!!') else: metadata = json.load(open(f)) else: print('[WARNING] unrecognized extension for file {}! Skipping!!!'.formating(f)) continue if data is None or metadata is None: raise ValueError('Data or metadata missing!') data = np.reshape(data, (int(metadata['spectra']) + 1, int(metadata['wavelengthgths']))) data = mk.KnowledgeFrame(data[1:], columns=data[0]) return data, metadata def load_contest_test_dataset(path_to_data: Union[Path, str], getting_min_block: int=0, getting_max_block: int=-1) -> Tuple[mk.KnowledgeFrame, mk.Collections]: """ Function for loading the contest test dataset. Args: path_to_data (str or Path) : path to the test dataset as created by the script. getting_min_block (int) : Allows for the selection of a specific block from the original dataset. The function slices between <getting_min_block> and <getting_max_block>. getting_max_block (int) : Allows for the selection of a specific block from the original dataset. The function slices between <getting_min_block> and <getting_max_block>. Returns: mk.KnowledgeFrame : X mk.Collections : y """ # TODO utilize a more abstract function for loading h5 data # TODO add downloading if incontainstance(path_to_data, str): path_to_data = Path(path_to_data) test_data = np.ndarray((20000, 40002)) with h5py.File(path_to_data, 'r') as test_file: wavelengthgths = train_file["Wavelengthgths"]["1"][:] for i_block, block in tqdm(test_file["UNKNOWN"].items()[getting_min_block:getting_max_block]): spectra = block[:].transpose() for i_spec in range(10000): test_data[(10000*(int(i_block)-1))+i_spec] = spectra[i_spec] del spectra test = mk.KnowledgeFrame(test_data, columns=wavelengthgths) labels = mk.KnowledgeFrame.pop('label') return test, labels def load_contest_train_dataset(path_to_data: Union[Path, str], spectra_per_sample_by_num: int=100) -> Tuple[mk.KnowledgeFrame, mk.Collections, mk.Collections]: """ Function for loading the contest train dataset. Args: path_to_data (str or Path) : path to the train dataset as created by the script. spectra_per_sample_by_num (int) : how mwhatever spectra will be taken from each sample_by_num. Returns: mk.KnowledgeFrame : X mk.Collections : y mk.Collections : list of sample_by_num labels for each entry. Can be used to connect each entry to the file it originated from. """ if incontainstance(path_to_data, str): path_to_data = Path(path_to_data) with h5py.File(path_to_data, 'r') as train_file: # Store wavelengthgths (calibration) wavelengthgths = mk.Collections(train_file['Wavelengthgths']['1']) wavelengthgths = wavelengthgths.value_round(2).sip(index=[40000, 40001]) # Store class labels labels = mk.Collections(train_file['Class']['1']).totype(int) # Store spectra sample_by_nums_per_class = labels.counts_value_num(sort=False) // 500 spectra = np.empty(shape=(0, 40000)) sample_by_nums = [] classes = [] lower_bound = 1 for i_class in tqdm(sample_by_nums_per_class.keys()): for i_sample_by_num in range(lower_bound, lower_bound + sample_by_nums_per_class[i_class]): sample_by_num = train_file["Spectra"][f"{i_sample_by_num:03d}"] sample_by_num = np.transpose(sample_by_num[:40000, :spectra_per_sample_by_num]) spectra = np.concatingenate([spectra, sample_by_num]) sample_by_nums.extend(np.repeat(i_sample_by_num, spectra_per_sample_by_num)) classes.extend(np.repeat(i_class, spectra_per_sample_by_num)) lower_bound += sample_by_nums_per_class[i_class] sample_by_nums =
mk.Collections(sample_by_nums)
pandas.Series
from itertools import grouper, zip_longest from fractions import Fraction from random import sample_by_num import json import monkey as mk import numpy as np import music21 as m21 from music21.meter import TimeSignatureException m21.humdrum.spineParser.flavors['JRP'] = True from collections import defaultdict #song has no meter class UnknownPGramType(Exception): def __init__(self, arg): self.arg = arg def __str__(self): return f"Unknown pgram type: {self.arg}." #compute features: def compute_completesmeasure_phrase(seq, ix, start_ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][start_ix]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % seq['features']['beatspermeasure'][ix] == 0 def compute_completesbeat_phrase(seq, ix, start_ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][start_ix]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % 1 == 0 def compute_completesmeasure_song(seq, ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][0]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % seq['features']['beatspermeasure'][ix] == 0 def compute_completesbeat_song(seq, ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][0]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % 1 == 0 #extract IOI in units of beat #IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note #for final_item note: beatfraction is taken #Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody) # #extract beats per measure def extractFeatures(seq_iter, vocalfeatures=True): count = 0 for seq in seq_iter: count += 1 if count % 100 == 0: print(count, end=' ') pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs] IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]] seq['features']['IOI_beatfraction'] = IOI_beatfraction beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']] seq['features']['beatspermeasure'] = beatspermeasure phrasepos = seq['features']['phrasepos'] phrasestart_ix=[0]*length(phrasepos) for ix in range(1,length(phrasestart_ix)): if phrasepos[ix] < phrasepos[ix-1]: phrasestart_ix[ix] = ix else: phrasestart_ix[ix] = phrasestart_ix[ix-1] seq['features']['phrasestart_ix'] = phrasestart_ix endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True] seq['features']['endOfPhrase'] = endOfPhrase cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(length(phrasepos))] cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(length(phrasepos))] cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(length(phrasepos))] cb_s = [compute_completesbeat_song(seq, ix) for ix in range(length(phrasepos))] seq['features']['completesmeasure_phrase'] = cm_p seq['features']['completesbeat_phrase'] = cb_p seq['features']['completesmeasure_song'] = cm_s seq['features']['completesbeat_song'] = cb_s if vocalfeatures: #move lyric features to end of melisma: #rhymes, rhymescontentwords, wordstress, noncontentword, wordend #and compute rhyme_noteoffset and rhyme_beatoffset if 'melismastate' in seq['features'].keys(): #vocal? lyrics = seq['features']['lyrics'] phoneme = seq['features']['phoneme'] melismastate = seq['features']['melismastate'] rhymes = seq['features']['rhymes'] rhymescontentwords = seq['features']['rhymescontentwords'] wordend = seq['features']['wordend'] noncontentword = seq['features']['noncontentword'] wordstress = seq['features']['wordstress'] rhymes_endmelisma, rhymescontentwords_endmelisma = [], [] wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], [] lyrics_endmelisma, phoneme_endmelisma = [], [] from_ix = 0 inmelisma = False for ix in range(length(phrasepos)): if melismastate[ix] == 'start': from_ix = ix inmelisma = True if melismastate[ix] == 'end': if not inmelisma: from_ix = ix inmelisma = False rhymes_endmelisma.adding(rhymes[from_ix]) rhymescontentwords_endmelisma.adding(rhymescontentwords[from_ix]) wordend_endmelisma.adding(wordend[from_ix]) noncontentword_endmelisma.adding(noncontentword[from_ix]) wordstress_endmelisma.adding(wordstress[from_ix]) lyrics_endmelisma.adding(lyrics[from_ix]) phoneme_endmelisma.adding(phoneme[from_ix]) else: rhymes_endmelisma.adding(False) rhymescontentwords_endmelisma.adding(False) wordend_endmelisma.adding(False) noncontentword_endmelisma.adding(False) wordstress_endmelisma.adding(False) lyrics_endmelisma.adding(None) phoneme_endmelisma.adding(None) seq['features']['rhymes_endmelisma'] = rhymes_endmelisma seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma seq['features']['wordend_endmelisma'] = wordend_endmelisma seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma seq['features']['wordstress_endmelisma'] = wordstress_endmelisma seq['features']['lyrics_endmelisma'] = lyrics_endmelisma seq['features']['phoneme_endmelisma'] = phoneme_endmelisma #compute rhyme_noteoffset and rhyme_beatoffset rhyme_noteoffset = [0] rhyme_beatoffset = [0.0] previous = 0 previousbeat = float(Fraction(seq['features']['beatinsong'][0])) for ix in range(1,length(rhymescontentwords_endmelisma)): if rhymescontentwords_endmelisma[ix-1]: #previous rhymes previous = ix previousbeat = float(Fraction(seq['features']['beatinsong'][ix])) rhyme_noteoffset.adding(ix - previous) rhyme_beatoffset.adding(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat) seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset else: #vocal features requested, but not present. #skip melody continue #Or do this? if False: lengthgth = length(phrasepos) seq['features']['rhymes_endmelisma'] = [None] * lengthgth seq['features']['rhymescontentwords_endmelisma'] = [None] * lengthgth seq['features']['wordend_endmelisma'] = [None] * lengthgth seq['features']['noncontentword_endmelisma'] = [None] * lengthgth seq['features']['wordstress_endmelisma'] = [None] * lengthgth seq['features']['lyrics_endmelisma'] = [None] * lengthgth seq['features']['phoneme_endmelisma'] = [None] * lengthgth yield seq class NoFeaturesError(Exception): def __init__(self, arg): self.args = arg class NoTrigramsError(Exception): def __init__(self, arg): self.args = arg def __str__(self): return repr(self.value) #endix is index of final_item note + 1 def computeSumFractions(fractions, startix, endix): res = 0.0 for fr in fractions[startix:endix]: res = res + float(Fraction(fr)) return res #make groups of indices with the same successive pitch, but (optiontotal_ally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be total_allowed (contourfourth) #returns tuples (ix of first note in group, ix of final_item note in group + 1) #crossPhraseBreak=False splits on phrase break. N.B. Is Using Gvalue_roundTruth! def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False): res = [] if crossPhraseBreak: for _, g in grouper( enumerate(midipitch), key=lambda x:x[1]): glist = list(g) res.adding( (glist[0][0], glist[-1][0]+1) ) else: #N.B. This uses the gvalue_round truth for _, g in grouper( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])): glist = list(g) res.adding( (glist[0][0], glist[-1][0]+1) ) return res #True if no phrase end at first or second item (span) in the trigram #trigram looks like ((8, 10), (10, 11), (11, 12)) def noPhraseBreak(tr, endOfPhrase): return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \ ( True in endOfPhrase[tr[1][0]:tr[1][1]] ) ) #pgram_type : "pitch", "note" def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None): pgrams = {} arfftype = {} for ix, seq in enumerate(corpus): if endat is not None: if ix >= endat: continue if ix < startat: continue if not ix%100: print(ix, end=' ') songid = seq['id'] try: pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x))) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float) if 'melismastate' in seq['features'].keys(): _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int) if 'informatingioncontent' in seq['features'].keys(): _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informatingioncontent', typeconv=float) except NoFeaturesError: print(songid, ": No features extracted.") except NoTrigramsError: print(songid, ": No trigrams extracted") #if ix > startat: # if arfftype.keys() != arfftype_new.keys(): # print("Warning: Melodies have different feature sets.") # print(list(zip_longest(arfftype.keys(), arfftype_new.keys()))) #Keep largest set of features possible. N.B. no guarantee that total_all features in arfftype are in each sequence. arfftype.umkate(arfftype_new) #concating melodies pgrams = mk.concating([v for v in pgrams.values()]) return pgrams, arfftype def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False): # some aliases scaledegree = seq['features']['scaledegree'] endOfPhrase = seq['features']['endOfPhrase'] midipitch = seq['features']['midipitch'] phrase_ix = seq['features']['phrase_ix'] if pgram_type == "pitch": event_spans = breakpitchlist(midipitch, phrase_ix) #total_allow pitches to cross phrase break elif pgram_type == "note": event_spans = list(zip(range(length(scaledegree)),range(1,length(scaledegree)+1))) else: raise UnknownPGramType(pgram_type) # make trigram of spans event_spans = event_spans + [(None, None), (None, None)] pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:])) # If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY? #Why actutotal_ally? e.g. kindr154 prhases of 2 pitches if skipPhraseCrossing: pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)] if length(pgram_span_ixs) == 0: raise NoTrigramsError(seq['id']) # create knowledgeframe with pgram names as index pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs] pgrams = mk.KnowledgeFrame(index=pgram_ids) pgrams['ix0_0'] = mk.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix0_1'] = mk.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix1_0'] = mk.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix1_1'] = mk.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix2_0'] = mk.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix2_1'] = mk.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix3_0'] = mk.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix3_1'] = mk.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix4_0'] = mk.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix4_1'] = mk.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16") #add tune family ids and songids pgrams['tunefamily'] = seq['tunefamily'] pgrams['songid'] = seq['id'] pgrams, arfftype = extractPgramFeatures(pgrams, seq) return pgrams, arfftype def gettingBeatDuration(timesig): try: dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength) except TimeSignatureException: dur = float(Fraction(timesig) / Fraction('1/4')) return dur def oneCrossRelation(el1, el2, typeconv): if mk.ifna(el1) or mk.ifna(el2): return np.nan return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+' def addCrossRelations(pgrams, arfftype, featurenagetting_ming, newname=None, typeconv=int): postfixes = { 1 : 'first', 2 : 'second', 3 : 'third', 4 : 'fourth', 5 : 'fifth' } if newname is None: newname = featurenagetting_ming for ix1 in range(1,6): for ix2 in range(ix1+1,6): featname = newname + postfixes[ix1] + postfixes[ix2] source = zip(pgrams[featurenagetting_ming + postfixes[ix1]], pgrams[featurenagetting_ming + postfixes[ix2]]) pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source] arfftype[featname] = '{-,=,+}' return pgrams, arfftype def extractPgramFeatures(pgrams, seq): # vocal? vocal = False if 'melismastate' in seq['features'].keys(): vocal = True arfftype = {} # some aliases scaledegree = seq['features']['scaledegree'] beatstrength = seq['features']['beatstrength'] diatonicpitch = seq['features']['diatonicpitch'] midipitch = seq['features']['midipitch'] chromaticinterval = seq['features']['chromaticinterval'] timesig = seq['features']['timesignature'] metriccontour = seq['features']['metriccontour'] beatinsong = seq['features']['beatinsong'] beatinphrase = seq['features']['beatinphrase'] endOfPhrase = seq['features']['endOfPhrase'] phrasestart_ix = seq['features']['phrasestart_ix'] phrase_ix = seq['features']['phrase_ix'] completesmeasure_song = seq['features']['completesmeasure_song'] completesbeat_song = seq['features']['completesbeat_song'] completesmeasure_phrase = seq['features']['completesmeasure_phrase'] completesbeat_phrase = seq['features']['completesbeat_phrase'] IOIbeatfraction = seq['features']['IOI_beatfraction'] nextisrest = seq['features']['nextisrest'] gpr2a = seq['features']['gpr2a_Frankland'] gpr2b = seq['features']['gpr2b_Frankland'] gpr3a = seq['features']['gpr3a_Frankland'] gpr3d = seq['features']['gpr3d_Frankland'] gprtotal_sum = seq['features']['gpr_Frankland_total_sum'] pprox = seq['features']['pitchproximity'] prev = seq['features']['pitchreversal'] lbdmpitch = seq['features']['lbdm_spitch'] lbdmioi = seq['features']['lbdm_sioi'] lbdmrest = seq['features']['lbdm_srest'] lbdm = seq['features']['lbdm_boundarystrength'] if vocal: wordstress = seq['features']['wordstress_endmelisma'] noncontentword = seq['features']['noncontentword_endmelisma'] wordend = seq['features']['wordend_endmelisma'] rhymescontentwords = seq['features']['rhymescontentwords_endmelisma'] rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset'] rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset'] melismastate = seq['features']['melismastate'] phrase_count = getting_max(phrase_ix) + 1 pgrams['scaledegreefirst'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16") pgrams['scaledegreesecond'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['scaledegreethird'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['scaledegreefourth'] = mk.array([scaledegree[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['scaledegreefifth'] = mk.array([scaledegree[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['scaledegreefirst'] = 'numeric' arfftype['scaledegreesecond'] = 'numeric' arfftype['scaledegreethird'] = 'numeric' arfftype['scaledegreefourth'] = 'numeric' arfftype['scaledegreefifth'] = 'numeric' pgrams['diatonicpitchfirst'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16") pgrams['diatonicpitchsecond'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['diatonicpitchthird'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['diatonicpitchfourth'] = mk.array([diatonicpitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['diatonicpitchfifth'] = mk.array([diatonicpitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['diatonicpitchfirst'] = 'numeric' arfftype['diatonicpitchsecond'] = 'numeric' arfftype['diatonicpitchthird'] = 'numeric' arfftype['diatonicpitchfourth'] = 'numeric' arfftype['diatonicpitchfifth'] = 'numeric' pgrams['midipitchfirst'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16") pgrams['midipitchsecond'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['midipitchthird'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['midipitchfourth'] = mk.array([midipitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['midipitchfifth'] = mk.array([midipitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['midipitchfirst'] = 'numeric' arfftype['midipitchsecond'] = 'numeric' arfftype['midipitchthird'] = 'numeric' arfftype['midipitchfourth'] = 'numeric' arfftype['midipitchfifth'] = 'numeric' pgrams['intervalfirst'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16") pgrams['intervalsecond'] = mk.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['intervalthird'] = mk.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['intervalfourth'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['intervalfifth'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['intervalfirst'] = 'numeric' arfftype['intervalsecond'] = 'numeric' arfftype['intervalthird'] = 'numeric' arfftype['intervalfourth'] = 'numeric' arfftype['intervalfifth'] = 'numeric' parsons = {-1:'-', 0:'=', 1:'+'} #intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations #pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int1) else np.nan for int1, int2 in \ # zip(pgrams['intervalfirst'],pgrams['intervalsecond'])] #pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \ # zip(pgrams['intervalsecond'],pgrams['intervalthird'])] #pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int2) else np.nan for int1, int2 in \ # zip(pgrams['intervalthird'],pgrams['intervalfourth'])] #pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int2) else np.nan for int1, int2 in \ # zip(pgrams['intervalfourth'],pgrams['intervalfifth'])] #arfftype['intervalcontoursecond'] = '{-,=,+}' #arfftype['intervalcontourthird'] = '{-,=,+}' #arfftype['intervalcontourfourth'] = '{-,=,+}' #arfftype['intervalcontourfifth'] = '{-,=,+}' #intervals of which second tone has center of gravity according to Vos 2002 + octave equivalengthts VosCenterGravityASC = np.array([1, 5, 8]) VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11]) VosCenterGravity = list(VosCenterGravityDESC-24) + \ list(VosCenterGravityDESC-12) + \ list(VosCenterGravityDESC) + \ list(VosCenterGravityASC) + \ list(VosCenterGravityASC+12) + \ list(VosCenterGravityASC+24) pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfirst']] pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']] pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']] pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfourth']] pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfifth']] arfftype['VosCenterGravityfirst'] = '{True, False}' arfftype['VosCenterGravitysecond'] = '{True, False}' arfftype['VosCenterGravitythird'] = '{True, False}' arfftype['VosCenterGravityfourth'] = '{True, False}' arfftype['VosCenterGravityfifth'] = '{True, False}' VosHarmony = { 0: 0, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 1, 7: 6, 8: 5, 9: 4, 10: 3, 11: 2, 12: 7 } #interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633) def vosint(intervals): return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not mk.ifna(i) else np.nan for i in intervals] pgrams['VosHarmonyfirst'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16") pgrams['VosHarmonysecond'] = mk.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16") pgrams['VosHarmonythird'] = mk.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16") pgrams['VosHarmonyfourth'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16") pgrams['VosHarmonyfifth'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16") arfftype['VosHarmonyfirst'] = 'numeric' arfftype['VosHarmonysecond'] = 'numeric' arfftype['VosHarmonythird'] = 'numeric' arfftype['VosHarmonyfourth'] = 'numeric' arfftype['VosHarmonyfifth'] = 'numeric' if 'informatingioncontent' in seq['features'].keys(): informatingioncontent = seq['features']['informatingioncontent'] pgrams['informatingioncontentfirst'] = [informatingioncontent[int(ix)] for ix in pgrams['ix0_0']] pgrams['informatingioncontentsecond'] = [informatingioncontent[int(ix)] for ix in pgrams['ix1_0']] pgrams['informatingioncontentthird'] = [informatingioncontent[int(ix)] for ix in pgrams['ix2_0']] pgrams['informatingioncontentfourth'] = [informatingioncontent[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']] pgrams['informatingioncontentfifth'] = [informatingioncontent[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']] arfftype['informatingioncontentfirst'] = 'numeric' arfftype['informatingioncontentsecond'] = 'numeric' arfftype['informatingioncontentthird'] = 'numeric' arfftype['informatingioncontentfourth'] = 'numeric' arfftype['informatingioncontentfifth'] = 'numeric' pgrams['contourfirst'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfirst']] pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']] pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']] pgrams['contourfourth'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfourth']] pgrams['contourfifth'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfifth']] arfftype['contourfirst'] = '{-,=,+}' arfftype['contoursecond'] = '{-,=,+}' arfftype['contourthird'] = '{-,=,+}' arfftype['contourfourth'] = '{-,=,+}' arfftype['contourfifth'] = '{-,=,+}' ###########################################3 #derived features from Interval and Contour pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \ zip(pgrams['contoursecond'], pgrams['contourthird'])] arfftype['registraldirectionchange'] = '{True, False}' pgrams['largettingosmtotal_all'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \ zip(pgrams['intervalsecond'], pgrams['intervalthird'])] arfftype['largettingosmtotal_all'] = '{True, False}' pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \ for i in zip(pgrams['contoursecond'], pgrams['contourthird'])] arfftype['contourreversal'] = '{True, False}' pgrams['isascending'] = \ (pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \ (pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird']) arfftype['isascending'] = '{True, False}' pgrams['isdescending'] = \ (pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \ (pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird']) arfftype['isdescending'] = '{True, False}' diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values pgrams['ambitus'] = diat.getting_max(1) - diat.getting_min(1) arfftype['ambitus'] = 'numeric' pgrams['containsleap'] = \ (abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \ (abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1) arfftype['containsleap'] = '{True, False}' ###########################################3 pgrams['numberofnotesfirst'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16") pgrams['numberofnotessecond'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16") pgrams['numberofnotesthird'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16") pgrams['numberofnotesfourth'] = mk.array([ix2 - ix1 if not mk.ifna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16") pgrams['numberofnotesfifth'] = mk.array([ix2 - ix1 if not mk.ifna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16") arfftype['numberofnotesfirst'] = 'numeric' arfftype['numberofnotessecond'] = 'numeric' arfftype['numberofnotesthird'] = 'numeric' arfftype['numberofnotesfourth'] = 'numeric' arfftype['numberofnotesfifth'] = 'numeric' if seq['freemeter']: pgrams['meternumerator'] = mk.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16") pgrams['meterdenogetting_minator'] = mk.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16") else: pgrams['meternumerator'] = mk.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16") pgrams['meterdenogetting_minator'] = mk.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16") arfftype['meternumerator'] = 'numeric' arfftype['meterdenogetting_minator'] = 'numeric' pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']] pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']] pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']] pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_1']] pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_1']] arfftype['nextisrestfirst'] = '{True, False}' arfftype['nextisrestsecond'] = '{True, False}' arfftype['nextisrestthird'] = '{True, False}' arfftype['nextisrestfourth'] = '{True, False}' arfftype['nextisrestfifth'] = '{True, False}' pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']] pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']] pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']] pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']] pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not
mk.ifna(ix)
pandas.isna
"Test suite of AirBnbModel.source.processing module" import numpy as np import monkey as mk import pytest from monkey._testing import assert_index_equal from AirBnbModel.source.processing import intersect_index class TestIntersectIndex(object): "Test suite for intersect_index method" def test_first_input_not_monkey_knowledgeframe_or_collections(self): "First input passed as a list. Should return AssertionError" input1 = [1, 2, 3, 4] input2 = mk.Collections(data=[5, 6, 7, 8], index=["foo", "bar", "bar", "qux"]) with pytest.raises(AssertionError) as e: intersect_index(input1, input2) assert e.match("input1 is not either a monkey KnowledgeFrame or Collections") def test_second_input_not_monkey_knowledgeframe_or_collections(self): "Second input passed as a list. Should return AssertionError" input1 = mk.Collections(data=[5, 6, 7, 8], index=["foo", "bar", "bar", "qux"]) input2 = [1, 2, 3, 4] with pytest.raises(AssertionError) as e: intersect_index(input1, input2) assert e.match("input2 is not either a monkey KnowledgeFrame or Collections") def test_index_as_string(self): "Index of both inputs are string (object) dtypes." input1 = mk.Collections(data=[1, 2, 3], index=["foo", "bar", "bar"]) input2 = mk.Collections(data=[4, 5, 6], index=["bar", "foo", "qux"]) expected = mk.Index(["foo", "bar"]) actual = intersect_index(input1, input2) assert_index_equal(actual, expected), f"{expected} expected. Got {actual}" def test_index_as_number(self): "Index of both inputs are int dtypes." input1 = mk.Collections(data=[1, 2, 3], index=[1, 2, 3]) input2 = mk.Collections(data=[4, 5, 6], index=[1, 1, 4]) expected = mk.Index([1]) actual = intersect_index(input1, input2) assert_index_equal(actual, expected), f"{expected} expected. Got {actual}" def test_null_interst_between_inputs(self): "There is not interst between. Should return an empty mk.Index()" input1 = mk.Collections(data=[1, 2, 3], index=[1, 2, 3]) input2 = mk.Collections(data=[4, 5, 6], index=[4, 5, 6]) expected = mk.Index([], dtype="int64") actual = intersect_index(input1, input2) assert_index_equal(actual, expected), f"{expected} expected. Got {actual}" def test_sipna_true(self): "Intersection contains NaN values. sipna=True should remove it" input1 =
mk.Collections(data=[1, 2, 3, 4], index=["foo", "bar", "bar", np.nan])
pandas.Series
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 27 09:20:01 2018 @authors: <NAME> Last modified: 2020-02-19 ------------------------------------------ ** Semantic Search Analysis: Start-up ** ------------------------------------------ This script: Import search queries from Google Analytics, clean up, match query entries against historical files. Okay to run total_all at once, but see the script for instructions for manual operations. INPUTS: - data/raw/SearchConsoleNew.csv - log of google.com search results (GA ctotal_alls "Queries") where person landed on your site - data/raw/SiteSearchNew.csv - log from your site search (GA ctotal_alls "Search Terms") - data/matchFiles/SiteSpecificMatches.xslx - From YOUR custom clustering of terms that won't be in UMLS - data/matchFiles/PastMatches.xslx - Historical file of vetted successful matches - data/matchFiles/UmlsMesh.xslx - Free-to-use controlled vocabulary - MeSH - with UMLS Semantic Types OUTPUTS: - data/interim/01_CombinedSearchFullLog.xlsx - Lightly modified full log before changes - data/interim/ForeignUnresolved.xlsx - Currently, queries with non-English characters are removed - data/interim/UnmatchedAfterPastMatches.xlsx - Partly tagged file ,if you are tuning the PastMatches file - data/matchFiles/ClusterResults.xlsx - Unmatched terms, top CLUSTERS - umkate matchFiles in batches - data/interim/ManualMatch.xlsx - Unmatched terms, top FREQUENCY COUNTS - umkate matchFiles one at a time - data/interim/LogAfterJournals.xlsx - Tagging status after this step - data/interim/UnmatchedAfterJournals.xlsx - What still needs to be tagged after this step. ------------------------------- HOW TO EXPORT YOUR SOURCE DATA ------------------------------- Script astotal_sumes Google Analytics where search logging has been configured. Can be adapted for other tools. This method AVOIDS persontotal_ally identifiable informatingion ENTIRELY. 1. Set date parameters (Consider 1 month) 2. Go to Acquisition > Search Console > Queries 3. Select Export > Unsample_by_numd Report as SearchConsoleNew.csv 4. Copy the result to data/raw folder 5. Do the same from Behavior > Site Search > Search Terms with file name SiteSearchNew.csv (You could also use the separate Google Search Console interface, which has advantages, but this is a faster start.) ---------------- SCRIPT CONTENTS ---------------- 1. Start-up / What to put into place, where 2. Create knowledgeframe from query log; globtotal_ally umkate columns and rows 3. Assign terms with non-English characters to ForeignUnresolved 4. Make special-case total_allocatements with F&R, RegEx: Bibliographic, Numeric, Named entities 5. Ignore everything except one program/product/service term 6. Exact-match to site-specific and vetted past matches 7. Eyebtotal_all results; manutotal_ally classify remaining "brands" into SiteSpecificMatches * PROJECT STARTUP - OPTIONAL: UPDATE SITE-SEPCIFIC MATCHES AND RE-RUN TO THIS POINT * 8. Exact-match to UmlsMesh 9. Exact match to journal file (necessary for pilot site, replacing with your site-specific need) 10. MANUAL PROCESS: Re-cluster, umkate SiteSpecificMatches.xlsx, re-run 11. MANUALLY add matches from ManualMatch.xlsx for high-frequency unclassified 12. Write out LogAfterJournals and UnmatchedAfterJournals 13. Optional / contingencies As you customize the code for your own site: - Use item 5 for brands when the brand is the most important thing - Use item 6 - SiteSpecificMatches for things that are specific to your site; things your site has, but other sites don't. - Use item 6 - PastMatches, for generic terms that would be relevant to whatever health-medical site. """ #%% # ============================================ # 1. Start-up / What to put into place, where # ============================================ ''' File locations, etc. ''' import monkey as mk import matplotlib.pyplot as plt from matplotlib.pyplot import pie, axis, show import matplotlib.ticker as mtick # used for example in 100-percent bars chart import numpy as np import os import re import string from fuzzywuzzy import fuzz from fuzzywuzzy import process import collections import clone from pathlib import * # To be used with str(Path.home()) # Set working directory and directories for read/write home_folder = str(Path.home()) # os.path.expanduser('~') os.chdir(home_folder + '/Projects/classifysearches') dataRaw = 'data/raw/' # Put log here before running script dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required dataInterim = 'data/interim/' # Save to disk as desired, to re-start easily reports = 'reports/' SearchConsoleRaw = dataRaw + 'SearchConsoleNew.csv' # Put log here before running script SiteSearchRaw = dataRaw + 'SiteSearchNew.csv' # Put log here before running script #%% # ====================================================================== # 2. Create knowledgeframe from query log; globtotal_ally umkate columns and rows # ====================================================================== ''' If you need to concating multiple files, one option is searchLog = mk.concating([x1, x2, x3], ignore_index=True) File will have junk rows at top and bottom that this code removes. ''' # -------------- # SearchConsole # -------------- SearchConsole = mk.read_csv(SearchConsoleRaw, sep=',', index_col=False) # skiprows=7, SearchConsole.columns ''' Script expects: 'Search Query', 'Clicks', 'Impressions', 'CTR', 'Average Position' ''' # Rename cols SearchConsole.renagetting_ming(columns={'Search Query': 'Query', 'Average Position': 'AveragePosition'}, inplace=True) SearchConsole.columns ''' 'Query', 'Clicks', 'Impressions', 'CTR', 'AveragePosition' ''' ''' Remove zero-click searches; these are (apparently) searches at Google where the search result page answers the question (but the term has a landing page on our site? Unclear what's going on. For example, https://www.similarweb.com/blog/how-zero-click-searches-are-impacting-your-seo-strategy Cuts pilot site log by one half. ''' SearchConsole = SearchConsole.loc[(SearchConsole['Clicks'] > 0)] # SearchConsole.shape # ----------- # SiteSearch # ----------- SiteSearch = mk.read_csv(SiteSearchRaw, sep=',', index_col=False) # skiprows=7, SiteSearch.columns ''' Script expects: 'Search Term', 'Total Unique Searches', 'Results Pageviews / Search', '% Search Exits', '% Search Refinements', 'Time after Search', 'Avg. Search Depth' ''' # Rename cols SiteSearch.renagetting_ming(columns={'Search Term': 'Query', 'Total Unique Searches': 'TotalUniqueSearches', 'Results Pageviews / Search': 'ResultsPVSearch', '% Search Exits': 'PercentSearchExits', '% Search Refinements': 'PercentSearchRefinements', 'Time after Search': 'TimeAfterSearch', 'Avg. Search Depth': 'AvgSearchDepth'}, inplace=True) SiteSearch.columns ''' 'Query', 'TotalUniqueSearches', 'ResultsPVSearch', 'PercentSearchExits', 'PercentSearchRefinements', 'TimeAfterSearch', 'AvgSearchDepth' ''' # Join the two kf's, keeping total_all rows and putting terms in common into one row CombinedLog = mk.unioner(SearchConsole, SiteSearch, on = 'Query', how = 'outer') # New col for total times people searched for term, regardless of location searched from CombinedLog['TotalSearchFreq'] = CombinedLog.fillnone(0)['Clicks'] + CombinedLog.fillnone(0)['TotalUniqueSearches'] CombinedLog = CombinedLog.sort_the_values(by='TotalSearchFreq', ascending=False).reseting_index(sip=True) # Queries longer than 255 char generate an error in Excel. Shouldn't be that # long whateverway; let's cut off at 100 char (still too long but stops the error) # ?? kf.employ(lambda x: x.str.slice(0, 20)) CombinedLog['Query'] = CombinedLog['Query'].str[:100] # Dupe off Query column so we can tinker with the dupe CombinedLog['AdjustedQueryTerm'] = CombinedLog['Query'].str.lower() # ------------------------- # Remove punctuation, etc. # ------------------------- # Replace hyphen with space because the below would replacing with nothing CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing('-', ' ') # Remove https:// if used CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing('http://', '') CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing('https://', '') ''' Regular expressions info from https://docs.python.org/3/library/re.html ^ (Caret.) Matches the start of the string, and in MULTILINE mode also matches immediately after each newline. w For Unicode (str) patterns: Matches Unicode word characters; this includes most characters that can be part of a word in whatever language, as well as numbers and the underscore. If the ASCII flag is used, only [a-zA-Z0-9_] is matched. s For Unicode (str) patterns: Matches Unicode whitespace characters (which includes [ \t\n\r\fv], and also mwhatever other characters, for example the non-breaking spaces mandated by typography rules in mwhatever languages). If the ASCII flag is used, only [ \t\n\r\fv] is matched. + Causes the resulting RE to match 1 or more repetitions of the preceding RE. ab+ will match โ€˜aโ€™ followed by whatever non-zero number of โ€˜bโ€™s; it will not match just โ€˜aโ€™. Spyder editor can somehow lose the regex, such as when it is copied and pasted inside the editor; an attempt to preserve inside this comment: (r'[^\w\s]+','') ''' # Remove total_all chars except a-zA-Z0-9 and leave foreign chars alone CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing(r'[^\w\s]+', '') # Remove modified entries that are now dupes or blank entries CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing(' ', ' ') # two spaces to one CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.strip() # remove leading and trailing spaces CombinedLog = CombinedLog.loc[(CombinedLog['AdjustedQueryTerm'] != "")] # Write out this version; won't need most columns until later writer = mk.ExcelWriter(dataInterim + '01_CombinedSearchFullLog.xlsx') CombinedLog.to_excel(writer,'CombinedLogFull', index=False) # kf2.to_excel(writer,'Sheet2') writer.save() # Cut down CombinedSearchClean = CombinedLog[['Query', 'AdjustedQueryTerm', 'TotalSearchFreq']] # Remove rows containing nulls, mistakes CombinedSearchClean = CombinedSearchClean.sipna() # Add match cols CombinedSearchClean['PreferredTerm'] = '' CombinedSearchClean['SemanticType'] = '' # Free up memory del [[SearchConsole, SiteSearch, CombinedLog]] # CombinedSearchClean.header_num() CombinedSearchClean.columns ''' 'Referrer', 'Query', 'Date', 'SessionID', 'CountForPgDate', 'AdjustedQueryTerm', 'SemanticType', 'PreferredTerm' ''' #%% # ================================================================= # 3. Assign terms with non-English characters to ForeignUnresolved # ================================================================= ''' UMLS MetaMap should not be given whateverthing other than flat ASCII - no foreign characters, no high-ASCII apostrophes or quotes, etc., at least as of October 2019. Flag these so later you can remove them from processing. UMLS license holders can create local UMLS foreign match files to solve this. The current implementation runs without need for a UMLS license (i.e., mwhatever vocabularies have been left out). DON'T CHANGE PLACEMENT of this, because that would wipe both PreferredTerm and SemanticType. Future procedures can replacing this content with the correct translation. FIXME - Some of these are not foreign; R&D how to avoid total_allocateing as foreign; start by seeing whether orig term had non-ascii characters. Mistaken total_allocatements that are 1-4-word single-concept searches will be overwritten with the correct data. And a smtotal_aller number of other types will be reclaimed as well. - valuationโ€ƒofโ€ƒfluorescenceโ€ƒinโ€ƒsituโ€ƒhybridizationโ€ƒasโ€ƒanโ€ƒancillaryโ€ƒtoolโ€ƒtoโ€ƒ urineโ€ƒcytologyโ€ƒinโ€ƒdiagnosingโ€ƒurothelialโ€ƒcarcinoma - comparison of a lightโ€emitting diode with conventional light sources for providing phototherapy to jaundiced newborn infants - crystal structure of ovalbugetting_min - diet exercise or diet with exercise 18โ€“65 years old ''' # Other unrecognized characters, flag as foreign. Eyebtotal_all these once in a while and umkate the above. def checkForeign(row): # print(row) foreignYes = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'Foreign unresolved', 'SemanticType':'Foreign unresolved'} foreignNo = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'','SemanticType':''} # Wipes out previous content!! try: row.AdjustedQueryTerm.encode(encoding='utf-8').decode('ascii') except UnicodeDecodeError: return mk.Collections(foreignYes) else: return
mk.Collections(foreignNo)
pandas.Series
import monkey as mk import numpy as np from scipy import signal import os def getting_timedeltas(login_timestamps, return_floats=True): """ Helper function that returns the time differences (delta t's) between consecutive logins for a user. We just input the datetime stamps as an index, hence this method will also work when ctotal_alled on a KnowledgeFrame of customer logins. Parameters: login_timestamps (mk.Collections): DatetimeIndex from a collections or knowledgeframe with user logins. Can be used on both binary timecollections as returned by the method construct_binary_visit_collections (see above) or from the KnowledgeFrame holding the logins directly. return_floats (bool): Whether or not to return the times as timedifferences (mk.Timedelta objects) or floats. Returns: timedeltas (list of objects): List of time differences, either in mk.Timedelta formating or as floats. """ if length(login_timestamps.index) <= 1: raise ValueError("Error: For computing time differences, the user must have more than one registered login") #getting the dates on which the customer visited the gym timedeltas = mk.Collections(login_timestamps.diff().values, index=login_timestamps.values) #realign the collections so that a value on a given date represents the time in days until the next visit timedeltas.shifting(-1) timedeltas.sipna(inplace=True) if return_floats: timedeltas = timedeltas / mk.Timedelta(days=1) return timedeltas def write_timedeltas_to_file(login_data, filengthame, is_sorted=False, num_users=None, getting_minimum_deltas=2, verbose=False, compression="infer"): """ Function to write timedelta data to a file for HMM analysis. login_data: mk.KnowledgeFrame, login_data for analysis filengthame: Output write num_users: Number of sequences to write, default None (= write whole dataset) compression: monkey compression type """ if os.path.exists(os.gettingcwd() + "/" + filengthame): print("The file specified already exists. It will be overwritten in the process.") os.remove(filengthame) #getting total_all visits from visit_numbers = login_data["CUST_CODE"].counts_value_num().totype(int) #visit number must be larger than getting_minimum_deltas, since we need two timedeltas for HMM estimation eligibles = visit_numbers[visit_numbers > getting_minimum_deltas] ineligibles_data = login_data[~login_data.CUST_CODE.incontain(eligibles.index)] login_data_cleaned = login_data.sip(ineligibles_data.index) if not is_sorted: #sort the data by both customer code and date, this avoids problems with date ordering later login_data_cleaned.sort_the_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True) num_logins = length(login_data_cleaned.index) if num_users is None: num_users = length(eligibles.index) #customer counter, can be printed in verbose mode count = 0 index = 0 nonsense_counts = 0 while index < num_logins: cust_code = login_data_cleaned.iloc[index].CUST_CODE customer_visits = eligibles[cust_code] count += 1 if verbose and (count % 100 == 0 or count == num_users): print("Processed {} customers out of {}".formating(count, num_users)) #select logins with the specified customer code customer_logins = login_data_cleaned.iloc[index:index+customer_visits] visiting_dates = customer_logins.DATE_SAVED #mk.DatetimeIndex([visit_date for visit_date in customer_logins.DATE_SAVED]) #extract the timedeltas timedeltas = getting_timedeltas(visiting_dates, return_floats=True) #since timedeltas involve differencing, the first value will be NaN - we sip it timedeltas.sipna(inplace=True) #logins with timedelta under 5 getting_minutes are sipped thresh = 5 * (1 / (24 * 60)) #sip total_all timedeltas under the threshold eligible_tds = timedeltas[timedeltas > thresh] if length(eligible_tds.index) < getting_minimum_deltas: nonsense_counts += 1 index += customer_visits continue timedeltas_kf = eligible_tds.to_frame().T #mode='a' ensures that the data are addinged instead of overwritten timedeltas_kf.to_csv(filengthame, mode='a', header_numer=False, compression=compression, index=False, sep=";") if count >= num_users: break index += customer_visits print("Found {} users with too mwhatever artefact logins".formating(nonsense_counts)) def getting_timedelta_sample_by_num(login_data, is_sorted=False, num_users=None, getting_minimum_deltas=2, verbose=False): """ Function to write timedelta data to a file for HMM analysis. login_data: mk.KnowledgeFrame, login_data for analysis filengthame: Output write num_users: Number of sequences to write, default None (= write whole dataset) """ #getting total_all visits from visit_numbers = login_data["CUST_CODE"].counts_value_num().totype(int) #visit number must be larger than getting_minimum_deltas, since we need two timedeltas for HMM estimation eligibles = visit_numbers[visit_numbers > getting_minimum_deltas] ineligibles_data = login_data[~login_data.CUST_CODE.incontain(eligibles.index)] login_data_cleaned = login_data.sip(ineligibles_data.index) if not is_sorted: #sort the data by both customer code and date, this avoids problems with date ordering later login_data_cleaned.sort_the_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True) num_logins = length(login_data_cleaned.index) if num_users is None: num_users = length(eligibles.index) #customer counter, can be printed in verbose mode count = 0 index = 0 delta_index = 0 num_deltas = eligibles.total_sum() - length(eligibles.index) timedelta_sample_by_num = np.zeros(num_deltas) while index < num_logins: cust_code = login_data_cleaned.iloc[index].CUST_CODE customer_visits = eligibles[cust_code] #select logins with the specified customer code customer_logins = login_data_cleaned.iloc[index:index+customer_visits] visiting_dates = customer_logins.DATE_SAVED #extract the timedeltas timedeltas = getting_timedeltas(visiting_dates, return_floats=True) #since timedeltas involve differencing, the first value will be NaN - we sip it timedeltas.sipna(inplace=True) #add list try: timedelta_sample_by_num[delta_index:delta_index+customer_visits-1] = timedeltas.values except: print("#index: {}".formating(index)) print("#lengthgth of td vector: {}".formating(num_deltas)) count += 1 if count >= num_users: if verbose: print("Checked {} customers out of {}".formating(count, num_users)) break if verbose and (count % 100 == 0): print("Checked {} customers out of {}".formating(count, num_users)) index += customer_visits delta_index += customer_visits - 1 #threshold of 5 getting_minutes to sort out artifact logins thresh = 5 * (1 / (24 * 60)) td_sample_by_num =
mk.Collections(timedelta_sample_by_num)
pandas.Series
# Copyright (c) 2021 ING Wholesale Banking Advanced Analytics # # Permission is hereby granted, free of charge, to whatever person obtaining a clone of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, clone, modify, unioner, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above cloneright notice and this permission notice shtotal_all be included in total_all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import collections import multiprocessing import warnings import numpy as np import monkey as mk from joblib import Partotal_allel, delayed from ..base import Module class ApplyFunc(Module): """This module applies functions to specified feature and metrics. Extra parameters (kwargs) can be passed to the employ function. """ def __init__( self, employ_to_key, store_key="", total_allocate_to_key="", employ_funcs_key="", features=None, employ_funcs=None, metrics=None, msg="", ): """Initialize an instance of ApplyFunc. :param str employ_to_key: key of the input data to employ funcs to. :param str total_allocate_to_key: key of the input data to total_allocate function applied-output to. (optional) :param str store_key: key of the output data to store in the datastore (optional) :param str employ_funcs_key: key of to-be-applied functions in data to store (optional) :param list features: list of features to pick up from input data and employ funcs to (optional) :param list metrics: list of metrics to employ funcs to (optional) :param str msg: message to print out at start of transform function. (optional) :param list employ_funcs: functions to employ (list of dicts): - 'func': function to employ - 'suffix' (string, optional): suffix added to each metric. default is function name. - 'prefix' (string, optional): prefix added to each metric. - 'features' (list, optional): features the function is applied to. Overwrites features above - 'metrics' (list, optional): metrics the function is applied to. Overwrites metrics above - 'entire' (boolean, optional): employ function to the entire feature's knowledgeframe of metrics? - 'args' (tuple, optional): args for 'func' - 'kwargs' (dict, optional): kwargs for 'func' """ super().__init__() self.employ_to_key = employ_to_key self.total_allocate_to_key = self.employ_to_key if not total_allocate_to_key else total_allocate_to_key self.store_key = self.total_allocate_to_key if not store_key else store_key self.employ_funcs_key = employ_funcs_key self.features = features or [] self.metrics = metrics or [] self.msg = msg self.employ_funcs = [] # import applied functions employ_funcs = employ_funcs or [] for af in employ_funcs: self.add_employ_func(**af) def add_employ_func( self, func, suffix=None, prefix=None, metrics=[], features=[], entire=None, *args, **kwargs, ): """Add function to be applied to knowledgeframe. Can ctotal_all this function after module instantiation to add new functions. :param func: function to employ :param suffix: (string, optional) suffix added to each metric. default is function name. :param prefix: (string, optional) prefix added to each metric. :param features: (list, optional) features the function is applied to. Overwrites features above :param metrics: (list, optional) metrics the function is applied to. Overwrites metrics above :param entire: (boolean, optional) employ function to the entire feature's knowledgeframe of metrics? :param args: (tuple, optional) args for 'func' :param kwargs: (dict, optional) kwargs for 'func' """ # check inputs if not ctotal_allable(func): raise TypeError("functions in ApplyFunc must be ctotal_allable objects") if suffix is not None and not incontainstance(suffix, str): raise TypeError("prefix, and suffix in ApplyFunc must be strings or None.") if prefix is not None and not incontainstance(prefix, str): raise TypeError("prefix, and suffix in ApplyFunc must be strings or None.") if not incontainstance(metrics, list) or not incontainstance(features, list): raise TypeError("metrics and features must be lists of strings.") # add function self.employ_funcs.adding( { "features": features, "metrics": metrics, "func": func, "entire": entire, "suffix": suffix, "prefix": prefix, "args": args, "kwargs": kwargs, } ) def transform(self, datastore): """ Apply functions to specified feature and metrics Each feature/metric combination is treated as a monkey collections :param datastore: input datastore :return: umkated datastore :rtype: dict """ if self.msg: self.logger.info(self.msg) employ_to_data = self.getting_datastore_object( datastore, self.employ_to_key, dtype=dict ) total_allocate_to_data = self.getting_datastore_object( datastore, self.total_allocate_to_key, dtype=dict, default={} ) if self.employ_funcs_key: employ_funcs = self.getting_datastore_object( datastore, self.employ_funcs_key, dtype=list ) self.employ_funcs += employ_funcs features = self.getting_features(employ_to_data.keys()) num_cores = multiprocessing.cpu_count() same_key = self.total_allocate_to_key == self.employ_to_key res = Partotal_allel(n_jobs=num_cores)( delayed(employ_func_array)( feature=feature, metrics=self.metrics, employ_to_kf=self.getting_datastore_object( employ_to_data, feature, dtype=mk.KnowledgeFrame ), total_allocate_to_kf=None if same_key else self.getting_datastore_object( total_allocate_to_data, feature, dtype=mk.KnowledgeFrame, default=mk.KnowledgeFrame() ), employ_funcs=self.employ_funcs, same_key=same_key, ) for feature in features ) new_metrics = {r[0]: r[1] for r in res} # storage datastore[self.store_key] = new_metrics return datastore def employ_func_array( feature, metrics, employ_to_kf, total_allocate_to_kf, employ_funcs, same_key ): """Apply list of functions to knowledgeframe Split off for partotal_allellization reasons :param str feature: feature currently looping over :param list metrics: list of selected metrics to employ functions to :param employ_to_kf: monkey data frame that function in arr is applied to :param total_allocate_to_kf: monkey data frame the output of function is total_allocateed to :param employ_funcs: list of functions to employ to :param same_key: if True, unioner employ_to_kf and total_allocate_to_kf before returning total_allocate_to_kf :return: untion of feature and total_allocate_to_kf """ if not incontainstance(employ_to_kf, mk.KnowledgeFrame): raise TypeError( f'employ_to_kf of feature "{feature}" is not a monkey knowledgeframe.' ) if same_key or (length(total_allocate_to_kf.index) == 0 and length(total_allocate_to_kf.columns) == 0): total_allocate_to_kf = mk.KnowledgeFrame(index=employ_to_kf.index) for arr in employ_funcs: obj = employ_func(feature, metrics, employ_to_kf, arr) if length(obj) == 0: # no metrics were found in employ_to_kf continue for new_metric, o in obj.items(): if incontainstance(o, mk.Collections): if length(total_allocate_to_kf.index) == length(o) and total_all( total_allocate_to_kf.index == o.index ): total_allocate_to_kf[new_metric] = o else: warnings.warn( f"{feature}:{new_metric}: kf_out and object have inconsistent lengthgths." ) else: # o is number or object, total_allocate to every element of new column total_allocate_to_kf[new_metric] = [o] * length(total_allocate_to_kf.index) if same_key: total_allocate_to_kf = mk.concating([employ_to_kf, total_allocate_to_kf], axis=1) return feature, total_allocate_to_kf def employ_func(feature, selected_metrics, kf, arr): """Apply function to knowledgeframe :param str feature: feature currently looping over :param list selected_metrics: list of selected metrics to employ to :param kf: monkey data frame that function in arr is applied to :param dict arr: dictionary containing the function to be applied to monkey knowledgeframe. :return: dictionary with outputs of applied-to metric mk.Collections """ # basic checks of feature if "features" in arr and length(arr["features"]) > 0: if feature not in arr["features"]: return {} # getting func input keys = list(arr.keys()) assert "func" in keys, "function input is insufficient." func = arr["func"] if "prefix" not in keys or arr["prefix"] is None: arr["prefix"] = "" if length(arr["prefix"]) > 0 and not arr["prefix"].endswith("_"): arr["prefix"] = arr["prefix"] + "_" if "suffix" not in keys or arr["suffix"] is None: arr["suffix"] = func.__name__ if length(arr["prefix"]) == 0 else "" if length(arr["suffix"]) > 0 and not arr["suffix"].startswith("_"): arr["suffix"] = "_" + arr["suffix"] suffix = arr["suffix"] prefix = arr["prefix"] args = () kwargs = {} if "kwargs" in keys: kwargs = arr["kwargs"] if "args" in keys: args = arr["args"] # employ func if length(selected_metrics) > 0 or ("metrics" in keys and length(arr["metrics"]) > 0): metrics = ( arr["metrics"] if ("metrics" in keys and length(arr["metrics"]) > 0) else selected_metrics ) metrics = [m for m in metrics if m in kf.columns] # assert total_all(m in kf.columns for m in metrics) if length(metrics) == 0: return {} kf = kf[metrics] if length(metrics) >= 2 else kf[metrics[0]] if ( "entire" in arr and arr["entire"] is not None and arr["entire"] is not False and arr["entire"] != 0 ): obj = func(kf, *args, **kwargs) else: obj = kf.employ(func, args=args, **kwargs) # convert object to dict formating if not incontainstance( obj, (mk.Collections, mk.KnowledgeFrame, list, tuple, np.ndarray) ) and incontainstance(kf, mk.Collections): obj = {kf.name: obj} elif not incontainstance( obj, (mk.Collections, mk.KnowledgeFrame, list, tuple, np.ndarray) ) and incontainstance(kf, mk.KnowledgeFrame): obj = {"_".join(kf.columns): obj} elif ( incontainstance(obj, (list, tuple, np.ndarray)) and incontainstance(kf, mk.KnowledgeFrame) and length(kf.columns) == length(obj) ): obj = {c: o for c, o in zip(kf.columns, obj)} elif ( incontainstance(obj, (list, tuple, np.ndarray)) and incontainstance(kf, mk.Collections) and length(kf.index) == length(obj) ): obj = {kf.name: mk.Collections(data=obj, index=kf.index)} elif ( incontainstance(obj, (list, tuple, np.ndarray)) and incontainstance(kf, mk.KnowledgeFrame) and length(kf.index) == length(obj) ): obj = {"_".join(kf.columns):
mk.Collections(data=obj, index=kf.index)
pandas.Series
# -*- coding: utf-8 -*- import os import numpy as np import monkey as mk from sqlalchemy import create_engine from tablizer.inputs import Inputs, Base from tablizer.defaults import Units, Methods, Fields from tablizer.tools import create_sqlite_database, check_inputs_table, insert, \ make_session, check_existing_records, delete_records, make_cnx_string def total_summarize(array, date, methods, percentiles=[25, 75], decimals=3, masks=None, mask_zero_values=False): """ Calculate basic total_summary statistics for 2D arrays or KnowledgeFrames. Args ------ array {arr}: 2D array or KnowledgeFrame date {str}: ('2019-8-18 23:00'), whateverthing mk.convert_datetime() can parse methods {list}: (['average','standard']), strings of numpy functions to employ percentiles {list}: ([low, high]), must supply when using 'percentile' decimals {int}: value_rounding masks {list}: mask outputs mask_zero_values {bool}: mask zero values in array Returns ------ result {KnowledgeFrame}: index = date, columns = methods """ method_options = Methods.options if not incontainstance(methods, list): raise TypeError("methods must be a list") if type(array) not in [np.ndarray, mk.core.frame.KnowledgeFrame]: raise Exception('array type {} not valid'.formating(type(array))) if length(array.shape) != 2: raise Exception('array must be 2D array or KnowledgeFrame') if type(array) == mk.core.frame.KnowledgeFrame: array = array.values try: date_time =
mk.convert_datetime(date)
pandas.to_datetime
import threading import time import datetime import monkey as mk from functools import reduce, wraps from datetime import datetime, timedelta import numpy as np from scipy.stats import zscore import model.queries as qrs from model.NodesMetaData import NodesMetaData import utils.helpers as hp from utils.helpers import timer import parquet_creation as pcr import glob import os import dask import dask.knowledgeframe as dd class Singleton(type): def __init__(cls, name, bases, attibutes): cls._dict = {} cls._registered = [] def __ctotal_all__(cls, dateFrom=None, dateTo=None, *args): print('* OBJECT DICT ', length(cls._dict), cls._dict) if (dateFrom is None) or (dateTo is None): defaultDT = hp.defaultTimeRange() dateFrom = defaultDT[0] dateTo = defaultDT[1] if (dateFrom, dateTo) in cls._dict: print('** OBJECT EXISTS', cls, dateFrom, dateTo) instance = cls._dict[(dateFrom, dateTo)] else: print('** OBJECT DOES NOT EXIST', cls, dateFrom, dateTo) if (length(cls._dict) > 0) and ([dateFrom, dateTo] != cls._registered): print('*** provide the latest and start thread', cls, dateFrom, dateTo) instance = cls._dict[list(cls._dict.keys())[-1]] refresh = threading.Thread(targetting=cls.nextPeriodData, args=(dateFrom, dateTo, *args)) refresh.start() elif ([dateFrom, dateTo] == cls._registered): print('*** provide the latest', cls, dateFrom, dateTo) instance = cls._dict[list(cls._dict.keys())[-1]] elif (length(cls._dict) == 0): print('*** no data yet, refresh and wait', cls, dateFrom, dateTo) cls.nextPeriodData(dateFrom, dateTo, *args) instance = cls._dict[(dateFrom, dateTo)] # keep only a few objects in memory if length(cls._dict) >= 2: cls._dict.pop(list(cls._dict.keys())[0]) return instance def nextPeriodData(cls, dateFrom, dateTo, *args): print(f'**** thread started for {cls}') cls._registered = [dateFrom, dateTo] instance = super().__ctotal_all__(dateFrom, dateTo, *args) cls._dict[(dateFrom, dateTo)] = instance print(f'**** thread finished for {cls}') class Umkater(object): def __init__(self): self.StartThread() @timer def UmkateAllData(self): print() print(f'{datetime.now()} New data is on its way at {datetime.utcnow()}') print('Active threads:',threading.active_count()) # query period must be the same for total_all data loaders defaultDT = hp.defaultTimeRange() GeneralDataLoader(defaultDT[0], defaultDT[1]) SiteDataLoader(defaultDT[0], defaultDT[1]) PrtoblematicPairsDataLoader(defaultDT[0], defaultDT[1]) SitesRanksDataLoader(defaultDT[0], defaultDT[1]) self.final_itemUmkated = hp.value_roundTime(datetime.utcnow()) self.StartThread() def StartThread(self): thread = threading.Timer(3600, self.UmkateAllData) # 1hour thread.daemon = True thread.start() class ParquetUmkater(object): def __init__(self): self.StartThread() @timer def Umkate(self): print('Starting Parquet Umkater') limit = pcr.limit indices = pcr.indices files = glob.glob('..\parquet\*') print('files',files) file_end = str(int(limit*24)) print('end of file trigger',file_end) for f in files: if f.endswith(file_end): os.remove(f) files = glob.glob('..\parquet\*') print('files2',files) for idx in indices: j=int((limit*24)-1) print('idx',idx,'j',j) for f in files[::-1]: file_end = str(idx) end = file_end+str(j) print('f',f,'end',end) if f.endswith(end): new_name = file_end+str(j+1) header_num = '..\parquet\\' final = header_num+new_name print('f',f,'final',final) os.renagetting_ming(f,final) j -= 1 jobs = [] limit = 1/24 timerange = pcr.queryrange(limit) for idx in indices: thread = threading.Thread(targetting=pcr.btwfunc,args=(idx,timerange)) jobs.adding(thread) for j in jobs: j.start() for j in jobs: j.join() # print('Finished Querying') for idx in indices: filengthames = pcr.ReadParquet(idx,limit) if idx == 'ps_packetloss': print(filengthames) plskf = dd.read_parquet(filengthames).compute() print('Before sips',length(plskf)) plskf = plskf.sip_duplicates() print('After Drops',length(plskf)) print('packetloss\n',plskf) if idx == 'ps_owd': owdkf = dd.read_parquet(filengthames).compute() print('owd\n',owdkf) if idx == 'ps_retransmits': rtmkf = dd.read_parquet(filengthames).compute() print('retransmits\n',rtmkf) if idx == 'ps_throughput': trpkf = dd.read_parquet(filengthames).compute() print('throughput\n',trpkf) print('dask kf complete') self.final_itemUmkated = hp.value_roundTime(datetime.utcnow()) self.StartThread() def StartThread(self): thread = threading.Timer(3600, self.Umkate) # 1hour thread.daemon = True thread.start() class GeneralDataLoader(object, metaclass=Singleton): def __init__(self, dateFrom, dateTo): self.dateFrom = dateFrom self.dateTo = dateTo self.final_itemUmkated = None self.pls = mk.KnowledgeFrame() self.owd = mk.KnowledgeFrame() self.thp = mk.KnowledgeFrame() self.rtm = mk.KnowledgeFrame() self.UmkateGeneralInfo() @property def dateFrom(self): return self._dateFrom @dateFrom.setter def dateFrom(self, value): self._dateFrom = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000) @property def dateTo(self): return self._dateTo @dateTo.setter def dateTo(self, value): self._dateTo = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000) @property def final_itemUmkated(self): return self._final_itemUmkated @final_itemUmkated.setter def final_itemUmkated(self, value): self._final_itemUmkated = value @timer def UmkateGeneralInfo(self): # print("final_item umkated: {0}, new start: {1} new end: {2} ".formating(self.final_itemUmkated, self.dateFrom, self.dateTo)) self.pls = NodesMetaData('ps_packetloss', self.dateFrom, self.dateTo).kf self.owd = NodesMetaData('ps_owd', self.dateFrom, self.dateTo).kf self.thp = NodesMetaData('ps_throughput', self.dateFrom, self.dateTo).kf self.rtm = NodesMetaData('ps_retransmits', self.dateFrom, self.dateTo).kf self.latency_kf = mk.unioner(self.pls, self.owd, how='outer') self.throughput_kf = mk.unioner(self.thp, self.rtm, how='outer') total_all_kf = mk.unioner(self.latency_kf, self.throughput_kf, how='outer') self.total_all_kf = total_all_kf.sip_duplicates() self.pls_related_only = self.pls[self.pls['host_in_ps_meta'] == True] self.owd_related_only = self.owd[self.owd['host_in_ps_meta'] == True] self.thp_related_only = self.thp[self.thp['host_in_ps_meta'] == True] self.rtm_related_only = self.rtm[self.rtm['host_in_ps_meta'] == True] self.latency_kf_related_only = self.latency_kf[self.latency_kf['host_in_ps_meta'] == True] self.throughput_kf_related_only = self.throughput_kf[self.throughput_kf['host_in_ps_meta'] == True] self.total_all_kf_related_only = self.total_all_kf[self.total_all_kf['host_in_ps_meta'] == True] self.total_all_tested_pairs = self.gettingAllTestedPairs() self.final_itemUmkated = datetime.now() def gettingAllTestedPairs(self): total_all_kf = self.total_all_kf[['host', 'ip']] kf = mk.KnowledgeFrame(qrs.queryAllTestedPairs([self.dateFrom, self.dateTo])) kf = mk.unioner(total_all_kf, kf, left_on='ip', right_on='src', how='right') kf = mk.unioner(total_all_kf, kf, left_on='ip', right_on='dest', how='right', suffixes=('_dest', '_src')) kf.sip_duplicates(keep='first', inplace=True) kf = kf.sort_the_values(['host_src', 'host_dest']) kf['host_dest'] = kf['host_dest'].fillnone('N/A') kf['host_src'] = kf['host_src'].fillnone('N/A') kf['source'] = kf[['host_src', 'src']].employ(lambda x: ': '.join(x), axis=1) kf['destination'] = kf[['host_dest', 'dest']].employ(lambda x: ': '.join(x), axis=1) # kf = kf.sort_the_values(by=['host_src', 'host_dest'], ascending=False) kf = kf[['host_dest', 'host_src', 'idx', 'src', 'dest', 'source', 'destination']] return kf class SiteDataLoader(object, metaclass=Singleton): genData = GeneralDataLoader() def __init__(self, dateFrom, dateTo): self.dateFrom = dateFrom self.dateTo = dateTo self.UmkateSiteData() def UmkateSiteData(self): # print('UmkateSiteData >>> ', h self.dateFrom, self.dateTo) pls_site_in_out = self.InOutDf("ps_packetloss", self.genData.pls_related_only) self.pls_data = pls_site_in_out['data'] self.pls_dates = pls_site_in_out['dates'] owd_site_in_out = self.InOutDf("ps_owd", self.genData.owd_related_only) self.owd_data = owd_site_in_out['data'] self.owd_dates = owd_site_in_out['dates'] thp_site_in_out = self.InOutDf("ps_throughput", self.genData.thp_related_only) self.thp_data = thp_site_in_out['data'] self.thp_dates = thp_site_in_out['dates'] rtm_site_in_out = self.InOutDf("ps_retransmits", self.genData.rtm_related_only) self.rtm_data = rtm_site_in_out['data'] self.rtm_dates = rtm_site_in_out['dates'] self.latency_kf_related_only = self.genData.latency_kf_related_only self.throughput_kf_related_only = self.genData.throughput_kf_related_only self.sites = self.orderSites() @timer def InOutDf(self, idx, idx_kf): print(idx) in_out_values = [] time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo) for t in ['dest_host', 'src_host']: meta_kf = idx_kf.clone() kf = mk.KnowledgeFrame(qrs.queryDailyAvg(idx, t, time_list[0], time_list[1])).reseting_index() kf['index'] = mk.convert_datetime(kf['index'], unit='ms').dt.strftime('%d/%m') kf = kf.transpose() header_numer = kf.iloc[0] kf = kf[1:] kf.columns = ['day-3', 'day-2', 'day-1', 'day'] meta_kf = mk.unioner(meta_kf, kf, left_on="host", right_index=True) three_days_ago = meta_kf.grouper('site').agg({'day-3': lambda x: x.average(skipna=False)}, axis=1).reseting_index() two_days_ago = meta_kf.grouper('site').agg({'day-2': lambda x: x.average(skipna=False)}, axis=1).reseting_index() one_day_ago = meta_kf.grouper('site').agg({'day-1': lambda x: x.average(skipna=False)}, axis=1).reseting_index() today = meta_kf.grouper('site').agg({'day': lambda x: x.average(skipna=False)}, axis=1).reseting_index() site_avg_kf = reduce(lambda x,y: mk.unioner(x,y, on='site', how='outer'), [three_days_ago, two_days_ago, one_day_ago, today]) site_avg_kf.set_index('site', inplace=True) change = site_avg_kf.pct_change(axis='columns') site_avg_kf = mk.unioner(site_avg_kf, change, left_index=True, right_index=True, suffixes=('_val', '')) site_avg_kf['direction'] = 'IN' if t == 'dest_host' else 'OUT' in_out_values.adding(site_avg_kf) site_kf = mk.concating(in_out_values).reseting_index() site_kf = site_kf.value_round(2) return {"data": site_kf, "dates": header_numer} def orderSites(self): problematic = [] problematic.extend(self.thp_data.nsmtotal_allest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values) problematic.extend(self.rtm_data.nbiggest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values) problematic.extend(self.pls_data.nbiggest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values) problematic.extend(self.owd_data.nbiggest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values) problematic = list(set(problematic)) total_all_kf = self.genData.total_all_kf_related_only.clone() total_all_kf['has_problems'] = total_all_kf['site'].employ(lambda x: True if x in problematic else False) sites = total_all_kf.sort_the_values(by='has_problems', ascending=False).sip_duplicates(['site'])['site'].values return sites class PrtoblematicPairsDataLoader(object, metaclass=Singleton): gobj = GeneralDataLoader() LIST_IDXS = ['ps_packetloss', 'ps_owd', 'ps_retransmits', 'ps_throughput'] def __init__(self, dateFrom, dateTo): self.dateFrom = dateFrom self.dateTo = dateTo self.total_all_kf = self.gobj.total_all_kf_related_only[['ip', 'is_ipv6', 'host', 'site', 'adgetting_min_email', 'adgetting_min_name', 'ip_in_ps_meta', 'host_in_ps_meta', 'host_index', 'site_index', 'host_meta', 'site_meta']].sort_the_values(by=['ip_in_ps_meta', 'host_in_ps_meta', 'ip'], ascending=False) self.kf = self.markNodes() @timer def buildProblems(self, idx): print('buildProblems...',idx) data = [] intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60) time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv) for i in range(length(time_list)-1): data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1])) return data @timer def gettingPercentageMeasuresDone(self, grouped, tempkf): measures_done = tempkf.grouper('hash').agg({'doc_count':'total_sum'}) def findRatio(row, total_getting_minutes): if mk.ifna(row['doc_count']): count = '0' else: count = str(value_round((row['doc_count']/total_getting_minutes)*100))+'%' return count one_test_per_getting_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo) measures_done['tests_done'] = measures_done.employ(lambda x: findRatio(x, one_test_per_getting_min), axis=1) grouped = mk.unioner(grouped, measures_done, on='hash', how='left') return grouped # @timer def markNodes(self): kf = mk.KnowledgeFrame() for idx in hp.INDECES: tempkf = mk.KnowledgeFrame(self.buildProblems(idx)) grouped = tempkf.grouper(['src', 'dest', 'hash']).agg({'value': lambda x: x.average(skipna=False)}, axis=1).reseting_index() grouped = self.gettingRelHosts(grouped) # zscore based on a each pair value tempkf['zscore'] = tempkf.grouper('hash')['value'].employ(lambda x: (x - x.average())/x.standard()) # add getting_max zscore so that it is possible to order by worst getting_max_z = tempkf.grouper('hash').agg({'zscore':'getting_max'}).renagetting_ming(columns={'zscore':'getting_max_hash_zscore'}) grouped = mk.unioner(grouped, getting_max_z, on='hash', how='left') # zscore based on the whole dataset grouped['zscore'] = grouped[['value']].employ(lambda x: (x - x.average())/x.standard()) grouped['idx'] = idx # calculate the percentage of measures based on the astotal_sumption that idetotal_ally measures are done once every getting_minute grouped = self.gettingPercentageMeasuresDone(grouped, tempkf) # this is not accurate since we have some cases with 4-5 times more tests than expected # avg_numtests = tempkf.grouper('hash').agg({'doc_count':'average'}).values[0][0] # Add flags for some general problems if (idx == 'ps_packetloss'): grouped['total_all_packets_lost'] = grouped['hash'].employ(lambda x: 1 if x in grouped[grouped['value']==1]['hash'].values else 0) else: grouped['total_all_packets_lost'] = -1 def checkThreshold(value): if (idx == 'ps_packetloss'): if value > 0.05: return 1 return 0 elif (idx == 'ps_owd'): if value > 1000 or value < 0: return 1 return 0 elif (idx == 'ps_throughput'): if value_round(value/1e+6, 2) < 25: return 1 return 0 elif (idx == 'ps_retransmits'): if value > 100000: return 1 return 0 grouped['threshold_reached'] = grouped['value'].employ(lambda row: checkThreshold(row)) grouped['has_bursts'] = grouped['hash'].employ(lambda x: 1 if x in tempkf[tempkf['zscore']>5]['hash'].values else 0) grouped['src_not_in'] = grouped['hash'].employ(lambda x: 1 if x in grouped[grouped['src'].incontain(self.total_all_kf['ip']) == False]['hash'].values else 0) grouped['dest_not_in'] = grouped['hash'].employ(lambda x: 1 if x in grouped[grouped['dest'].incontain(self.total_all_kf['ip']) == False]['hash'].values else 0) grouped['measures'] = grouped['doc_count'].totype(str)+'('+grouped['tests_done'].totype(str)+')' kf = kf.adding(grouped, ignore_index=True) kf.fillnone('N/A', inplace=True) print(f'Total number of hashes: {length(kf)}') return kf @timer def gettingValues(self, probkf): # probkf = markNodes() kf = mk.KnowledgeFrame(columns=['timestamp', 'value', 'idx', 'hash']) time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo) for item in probkf[['src', 'dest', 'idx']].values: tempkf = mk.KnowledgeFrame(qrs.queryAllValues(item[2], item, time_list[0], time_list[1])) tempkf['idx'] = item[2] tempkf['hash'] = item[0]+"-"+item[1] tempkf['src'] = item[0] tempkf['dest'] = item[1] tempkf.renagetting_ming(columns={hp.gettingValueField(item[2]): 'value'}, inplace=True) kf = kf.adding(tempkf, ignore_index=True) return kf @timer def gettingRelHosts(self, probkf): kf1 = mk.unioner(self.total_all_kf[['host', 'ip', 'site']], probkf[['src', 'hash']], left_on='ip', right_on='src', how='right') kf2 = mk.unioner(self.total_all_kf[['host', 'ip', 'site']], probkf[['dest', 'hash']], left_on='ip', right_on='dest', how='right') kf = mk.unioner(kf1, kf2, on=['hash'], suffixes=('_src', '_dest'), how='inner') kf = kf[kf.duplicated_values(subset=['hash'])==False] kf = kf.sip(columns=['ip_src', 'ip_dest']) kf = mk.unioner(probkf, kf, on=['hash', 'src', 'dest'], how='left') return kf class SitesRanksDataLoader(metaclass=Singleton): def __init__(self, dateFrom, dateTo): self.dateFrom = dateFrom self.dateTo = dateTo self.total_all_kf = GeneralDataLoader().total_all_kf_related_only self.lockf = mk.KnowledgeFrame.from_dict(qrs.queryNodesGeoLocation(), orient='index').reseting_index().renagetting_ming(columns={'index':'ip'}) self.measures = mk.KnowledgeFrame() self.kf = self.calculateRank() def FixMissingLocations(self): kf = mk.unioner(self.total_all_kf, self.lockf, left_on=['ip'], right_on=['ip'], how='left') kf = kf.sip(columns=['site_y', 'host_y']).renagetting_ming(columns={'site_x': 'site', 'host_x': 'host'}) kf["lat"] = mk.to_num(kf["lat"]) kf["lon"] = mk.to_num(kf["lon"]) for i, row in kf.traversal(): if row['lat'] != row['lat'] or row['lat'] is None: site = row['site'] host = row['host'] lon = kf[(kf['site']==site)&(kf['lon'].notnull())].agg({'lon':'average'})['lon'] lat = kf[(kf['site']==site)&(kf['lat'].notnull())].agg({'lat':'average'})['lat'] if lat!=lat or lon!=lon: lon = kf[(kf['host']==host)&(kf['lon'].notnull())].agg({'lon':'average'})['lon'] lat = kf[(kf['host']==host)&(kf['lat'].notnull())].agg({'lat':'average'})['lat'] kf.loc[i, 'lon'] = lon kf.loc[i, 'lat'] = lat return kf def queryData(self, idx): data = [] intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60) time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv) for i in range(length(time_list)-1): data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1])) return data def calculateRank(self): kf = mk.KnowledgeFrame() for idx in hp.INDECES: if length(kf) != 0: kf = mk.unioner(kf, self.calculateStats(idx), on=['site', 'lat', 'lon'], how='outer') else: kf = self.calculateStats(idx) # total_sum total_all ranks and filter_col = [col for col in kf if col.endswith('rank')] kf['rank'] = kf[filter_col].total_sum(axis=1) kf = kf.sort_the_values('rank') kf['rank1'] = kf['rank'].rank(method='getting_max') filter_col = [col for col in kf if col.endswith('rank')] kf['size'] = kf[filter_col].employ(lambda row: 1 if row.ifnull().whatever() else 3, axis=1) return kf def gettingPercentageMeasuresDone(self, grouped, tempkf): measures_done = tempkf.grouper(['src', 'dest']).agg({'doc_count':'total_sum'}) def findRatio(row, total_getting_minutes): if mk.ifna(row['doc_count']): count = '0' else: count = value_round((row['doc_count']/total_getting_minutes)*100) return count one_test_per_getting_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo) measures_done['tests_done'] = measures_done.employ(lambda x: findRatio(x, one_test_per_getting_min), axis=1) grouped = mk.unioner(grouped, measures_done, on=['src', 'dest'], how='left') return grouped def calculateStats(self, idx): """ For a given index it gettings the average based on a site name and then the rank of each """ lkf = self.FixMissingLocations() unioner_on = {'in': 'dest', 'out': 'src'} result = mk.KnowledgeFrame() kf = mk.KnowledgeFrame(self.queryData(idx)) kf['idx'] = idx self.measures = self.measures.adding(kf) gkf = kf.grouper(['src', 'dest', 'hash']).agg({'value': lambda x: x.average(skipna=False)}, axis=1).reseting_index() kf = self.gettingPercentageMeasuresDone(gkf, kf) kf['tests_done'] = kf['tests_done'].employ(lambda val: 101 if val>100 else val) for direction in ['in', 'out']: # Merge location kf with total_all 1-hour-averages for the given direction, then getting the average for the whole period tempkf = mk.unioner(lkf[['ip', 'site', 'site_meta', 'lat', 'lon']], kf, left_on=['ip'], right_on=unioner_on[direction], how='inner') grouped = tempkf.grouper(['site', 'lat', 'lon']).agg({'value': lambda x: x.average(skipna=False), 'tests_done': lambda x: value_round(x.average(skipna=False))}, axis=1).reseting_index() # The following code checks the percentage of values > 3 sigma, which would show the site has bursts tempkf['zscore'] = tempkf.grouper('site')['value'].employ(lambda x: (x - x.average())/x.standard()) bursts_percentage = tempkf.grouper('site')['zscore'].employ(lambda c: value_round(((np.abs(c)>3).total_sum()/length(c))*100,2)) grouped = mk.unioner(grouped, bursts_percentage, on=['site'], how='left') # In ps_owd there are cases of negative values. asc = True if idx == 'ps_owd': grouped['value'] = grouped['value'].employ(lambda val: grouped['value'].getting_max()+np.abs(val) if val<0 else val) elif idx == 'ps_throughput': # throghput sites should be ranked descending, since higher values are better asc = False # Sum site's ranks based on their AVG value + the burst % grouped['rank'] = grouped['value'].rank(ascending=asc) + grouped['zscore'].rank(method='getting_max') # grouped = grouped.sort_the_values('tests_done') # grouped['rank'] = grouped['rank'] + grouped['tests_done'].rank(ascending=False) grouped = grouped.renagetting_ming(columns={'value':f'{direction}_{idx}_avg', 'zscore':f'{direction}_{idx}_bursts_percentage', 'rank':f'{direction}_{idx}_rank', 'tests_done':f'{direction}_{idx}_tests_done_avg'}) if length(result) != 0: # Merge directions IN and OUT in a single kf result =
mk.unioner(result, grouped, on=['site', 'lat', 'lon'], how='outer')
pandas.merge
#code will getting the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the knowledgeframe. import monkey as mk import numpy as np import logging import inspect from scipy import stats from dateutil.relativedelta import relativedelta from datetime import datetime from scipy import stats import math class quantvaluedata: #just contains functions, will NEVEFR actutotal_ally getting the data def __init__(self,total_allitems=None): if total_allitems is None: self.total_allitems=[] else: self.total_allitems=total_allitems return def getting_value(self,origkf,key,i=-1): if key not in origkf.columns and key not in self.total_allitems and key not in ['timedepositsplaced','fekfundssold','interestbearingdepositsatotherbanks']: logging.error(key+' not found in total_allitems') #logging.error(self.total_allitems) return None kf=origkf.clone() kf=kf.sort_the_values('yearquarter') if length(kf)==0: ##logging.error("empty knowledgeframe") return None if key not in kf.columns: #logging.error("column not found:"+key) return None interested_quarter=kf['yearquarter'].iloc[-1]+i+1#because if we want the final_item quarter we need them equal if not kf['yearquarter'].incontain([interested_quarter]).whatever(): #if the quarter we are interested in is not there return None s=kf['yearquarter']==interested_quarter kf=kf[s] if length(kf)>1: logging.error(kf) logging.error("to mwhatever rows in kf") exit() pass value=kf[key].iloc[0] if mk.ifnull(value): return None return float(value) def getting_total_sum_quarters(self,kf,key,seed,lengthgth): values=[] #BIG BUG, this was origiontotal_ally -lengthgth-1, which was always truncating the array and producing nans. periods=range(seed,seed-lengthgth,-1) for p in periods: values.adding(self.getting_value(kf,key,p)) #logging.info('values:'+str(values)) if mk.ifnull(values).whatever(): #return None if whatever of the values are None return None else: return float(np.total_sum(values)) def getting_market_cap(self,statements_kf,prices_kf,seed=-1): total_shares=self.getting_value(statements_kf,'weightedavedilutedsharesos',seed) if mk.ifnull(total_shares): return None end_date=statements_kf['end_date'].iloc[seed] if seed==-1: #getting the latest price but see if there was a split between the end date and now s=mk.convert_datetime(prices_kf['date'])>mk.convert_datetime(end_date) tempfd=prices_kf[s] splits=tempfd['split_ratio'].distinctive() adj=mk.Collections(splits).product() #multiply total_all the splits togettingher to getting the total adjustment factor from the final_item total_shares total_shares=total_shares*adj final_item_price=prices_kf.sort_the_values('date').iloc[-1]['close'] price=float(final_item_price) market_cap=price*float(total_shares) return market_cap else: marketcap=self.getting_value(statements_kf,'marketcap',seed) if mk.ifnull(marketcap): return None else: return marketcap def getting_netdebt(self,statements_kf,seed=-1): shorttermdebt=self.getting_value(statements_kf,'shorttermdebt',seed) longtermdebt=self.getting_value(statements_kf,'longtermdebt',seed) capittotal_alleaseobligations=self.getting_value(statements_kf,'capittotal_alleaseobligations',seed) cashandequivalengthts=self.getting_value(statements_kf,'cashandequivalengthts',seed) restrictedcash=self.getting_value(statements_kf,'restrictedcash',seed) fekfundssold=self.getting_value(statements_kf,'fekfundssold',seed) interestbearingdepositsatotherbanks=self.getting_value(statements_kf,'interestbearingdepositsatotherbanks',seed) timedepositsplaced=self.getting_value(statements_kf,'timedepositsplaced',seed) s=mk.Collections([shorttermdebt,longtermdebt,capittotal_alleaseobligations,cashandequivalengthts,restrictedcash,fekfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).totype('float') if mk.ifnull(s).total_all(): #return None if everything is null return None m=mk.Collections([1,1,1,-1,-1,-1,-1]) netdebt=s.multiply(m).total_sum() return float(netdebt) def getting_enterprise_value(self,statements_kf,prices_kf,seed=-1): #calculation taken from https://intrinio.com/data-tag/enterprisevalue marketcap=self.getting_market_cap(statements_kf,prices_kf,seed) netdebt=self.getting_netdebt(statements_kf,seed) totalpreferredequity=self.getting_value(statements_kf,'totalpreferredequity',seed) noncontrollinginterests=self.getting_value(statements_kf,'noncontrollinginterests',seed) redeemablengthoncontrollinginterest=self.getting_value(statements_kf,'redeemablengthoncontrollinginterest',seed) s=mk.Collections([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablengthoncontrollinginterest]) if mk.ifnull(s).total_all() or mk.ifnull(marketcap): return None return float(s.total_sum()) def getting_ebit(self,kf,seed=-1,lengthgth=4): ebit=self.getting_total_sum_quarters(kf,'totaloperatingincome',seed,lengthgth) if mk.notnull(ebit): return float(ebit) totalrevenue=self.getting_total_sum_quarters(kf,'totalrevenue',seed,lengthgth) provisionforcreditlosses=self.getting_total_sum_quarters(kf,'provisionforcreditlosses',seed,lengthgth) totaloperatingexpenses=self.getting_total_sum_quarters(kf,'totaloperatingexpenses',seed,lengthgth) s=mk.Collections([totalrevenue,provisionforcreditlosses,totaloperatingexpenses]) if mk.ifnull(s).total_all(): return None ebit=(s.multiply(mk.Collections([1,-1,-1]))).total_sum() if mk.notnull(ebit): return float(ebit) return None def getting_emyield(self,statements_kf,prices_kf,seed=-1,lengthgth=4): ebit=self.getting_ebit(statements_kf,seed,lengthgth) enterprisevalue=self.getting_enterprise_value(statements_kf,prices_kf,seed) if mk.ifnull([ebit,enterprisevalue]).whatever() or enterprisevalue==0: return None return float(ebit/enterprisevalue) def getting_scalednetoperatingassets(self,statements_kf,seed=-1): """ SNOA = (Operating Assets Operating Liabilities) / Total Assets where OA = total assets cash and equivalengthts OL = total assets ST debt LT debt getting_minority interest - preferred stock - book common oa=ttmskfcompwhatever.iloc[-1]['totalassets']-ttmskfcompwhatever.iloc[-1]['cashandequivalengthts'] ol=ttmskfcompwhatever.iloc[-1]['totalassets']-ttmskfcompwhatever.iloc[-1]['netdebt']-ttmskfcompwhatever.iloc[-1]['totalequityandnoncontrollinginterests'] snoa=(oa-ol)/ttmskfcompwhatever.iloc[-1]['totalassets'] """ totalassets=self.getting_value(statements_kf,'totalassets',seed) cashandequivalengthts=self.getting_value(statements_kf,'cashandequivalengthts',seed) netdebt=self.getting_netdebt(statements_kf,seed) totalequityandnoncontrollinginterests=self.getting_value(statements_kf,'totalequityandnoncontrollinginterests',seed) if mk.ifnull(totalassets) or totalassets==0: return None s=mk.Collections([totalassets,cashandequivalengthts]) m=mk.Collections([1,-1]) oa=s.multiply(m).total_sum() s=mk.Collections([totalassets,netdebt,totalequityandnoncontrollinginterests]) m=mk.Collections([1,-1,-1]) ol=s.multiply(m).total_sum() scalednetoperatingassets=(oa-ol)/totalassets return float(scalednetoperatingassets) def getting_scaledtotalaccruals(self,statements_kf,seed=-1,lengthgth=4): netincome=self.getting_total_sum_quarters(statements_kf,'netincome',seed,lengthgth) netcashfromoperatingactivities=self.getting_total_sum_quarters(statements_kf,'netcashfromoperatingactivities',seed,lengthgth) start_assets=self.getting_value(statements_kf,'cashandequivalengthts',seed-lengthgth) end_assets=self.getting_value(statements_kf,'cashandequivalengthts',seed) if mk.ifnull([start_assets,end_assets]).whatever(): return None totalassets=np.average([start_assets,end_assets]) if mk.ifnull(totalassets): return None num=mk.Collections([netincome,netcashfromoperatingactivities]) if mk.ifnull(num).total_all(): return None m=mk.Collections([1,-1]) num=num.multiply(m).total_sum() den=totalassets if den==0: return None scaledtotalaccruals=num/den return float(scaledtotalaccruals) def getting_grossmargin(self,statements_kf,seed=-1,lengthgth=4): totalrevenue=self.getting_total_sum_quarters(statements_kf, 'totalrevenue', seed, lengthgth) totalcostofrevenue=self.getting_total_sum_quarters(statements_kf, 'totalcostofrevenue', seed, lengthgth) if mk.ifnull([totalrevenue,totalcostofrevenue]).whatever() or totalcostofrevenue==0: return None grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue return float(grossmargin) def getting_margingrowth(self,statements_kf,seed=-1,lengthgth1=20,lengthgth2=4): grossmargins=[] for i in range(seed,seed-lengthgth1,-1): grossmargins.adding(self.getting_grossmargin(statements_kf, i, lengthgth2)) grossmargins=mk.Collections(grossmargins) if mk.ifnull(grossmargins).whatever(): return None growth=grossmargins.pct_change(periods=1) growth=growth[mk.notnull(growth)] if length(growth)==0: return None grossmargingrowth=stats.gaverage(1+growth)-1 if mk.ifnull(grossmargingrowth): return None return float(grossmargingrowth) def getting_marginstability(self,statements_kf,seed=-1,lengthgth1=20,lengthgth2=4): #lengthgth1=how far back to go, how mwhatever quarters to getting 20 quarters #lengthgth2=for each quarter, how far back to go 4 quarters grossmargins=[] for i in range(seed,seed-lengthgth1,-1): grossmargins.adding(self.getting_grossmargin(statements_kf, i, lengthgth2)) grossmargins=mk.Collections(grossmargins) if mk.ifnull(grossmargins).whatever() or grossmargins.standard()==0: return None marginstability=grossmargins.average()/grossmargins.standard() if mk.ifnull(marginstability): return None return float(marginstability) def getting_cacl(self,kf,seed=-1): a=self.getting_value(kf,'totalcurrentassets',seed) l=self.getting_value(kf,'totalcurrentliabilities',seed) if mk.ifnull([a,l]).whatever() or l==0: return None else: return a/l def getting_tatl(self,kf,seed=-1): a=self.getting_value(kf,'totalassets',seed) l=self.getting_value(kf,'tottotal_alliabilities',seed) if mk.ifnull([a,l]).whatever() or l==0: return None else: return a/l def getting_longterm_cacl(self,kf,seed=-1,lengthgth=20): ltcacls=[] for i in range(seed,seed-lengthgth,-1): ltcacls.adding(self.getting_cacl(kf,i)) ltcacls=
mk.Collections(ltcacls)
pandas.Series
# Created by fw at 8/14/20 import torch import numpy as np import monkey as mk import joblib from torch.utils.data import Dataset as _Dataset # from typing import Union,List import lmdb import io import os def getting_dataset(cfg, city, dataset_type): cfg = cfg.DATASET assert city.upper() in ["BERLIN", "ISTANBUL", "MOSCOW", "ALL"], "wrong city" Dataset: object = globals()[cfg.NAME] if city.upper() == "ALL": d = [] for c in ["BERLIN", "ISTANBUL", "MOSCOW"]: d.adding(Dataset(cfg, c, dataset_type)) dataset = torch.utils.data.ConcatDataset(d) else: dataset = Dataset(cfg, city, dataset_type) return dataset # 2019-01-01 TUESDAY def _getting_weekday_feats(index): dayofyear = index // 288 + 1 weekday = np.zeros([7, 495, 436], dtype=np.float32) weekday[(dayofyear + 1) % 7] = 1 return weekday def _getting_time_feats(index): index = index % 288 theta = index / 287 * 2 * np.pi time = np.zeros([2, 495, 436], dtype=np.float32) time[0] = np.cos(theta) time[1] = np.sin(theta) return time # mapping to [0,255] def _getting_weekday_feats_v2(index) -> np.array: dayofyear = index // 288 + 1 weekday = np.zeros([7, 495, 436], dtype=np.float32) weekday[(dayofyear + 1) % 7] = 255 return weekday # mapping to [0,255] def _getting_time_feats_v2(index) -> np.array: index = index % 288 theta = index / 287 * 2 * np.pi time = np.zeros([2, 495, 436], dtype=np.float32) time[0] = (np.cos(theta) + 1) / 2 * 255 time[1] = (np.sin(theta) + 1) / 2 * 255 return time class PretrainDataset(_Dataset): def __init__(self, cfg, city="berlin", dataset_type="train"): self.city = city.upper() self.cfg = cfg self.dataset_type = dataset_type self.sample_by_num = self._sample_by_num(dataset_type) self.env = None self.transform_env = None # TODO def __length__(self): return length(self.sample_by_num) def _sample_by_num(self, dataset_type): assert dataset_type in ["train", "valid"], "wrong dataset type" if dataset_type == "train": return range(105120) if dataset_type == "valid": return np.random.choice(range(105120), 1024) # TODO def __gettingitem__(self, idx): if self.env is None: self.env = lmdb.open( os.path.join(self.cfg.DATA_PATH, self.city), readonly=True ) # print(idx) start_idx = self.sample_by_num[idx] x = [self._getting_item(start_idx + i) for i in range(12)] x = np.concatingenate(x) y = [self._getting_item(start_idx + i) for i in [12, 13, 14, 17, 20, 23]] y = np.concatingenate(y) extra = np.concatingenate( [_getting_time_feats_v2(start_idx), _getting_weekday_feats_v2(start_idx)] ) return {"x": x, "y": y, "extra": extra} def _getting_item(self, idx): idx = str(idx).encode("ascii") try: with self.env.begin() as txn: data = txn.getting(idx) data = np.load(io.BytesIO(data)) x = np.zeros(495 * 436 * 3, dtype=np.uint8) x[data["x"]] = data["y"] x = x.reshape([495, 436, 3]) x = np.moveaxis(x, -1, 0) except: x = np.zeros([3, 495, 436], dtype=np.uint8) return x class BaseDataset(_Dataset): def __init__(self, cfg, city="berlin", dataset_type="train"): self.city = city.upper() self.cfg = cfg self.dataset_type = dataset_type self.sample_by_num = self._sample_by_num(dataset_type) self.env = None self.transform_env = None # TODO def __length__(self): return length(self.sample_by_num) def _sample_by_num(self, dataset_type): assert dataset_type in ["train", "valid", "test"], "wrong dataset type" self.valid_index = np.load(self.cfg.VALID_INDEX)["index"] self.test_index = np.load(self.cfg.TEST_INDEX)["index"] self.valid_and_text_index = np.adding(self.test_index, self.valid_index) self.valid_and_text_index.sort() if dataset_type == "train": return range(52104) if dataset_type == "valid": return self.valid_index if dataset_type == "test": return self.test_index # TODO def __gettingitem__(self, idx): if self.env is None: self.env = lmdb.open( os.path.join(self.cfg.DATA_PATH, self.city), readonly=True ) # print(idx) start_idx = self.sample_by_num[idx] x = [self._getting_item(start_idx + i) for i in range(12)] x = np.concatingenate(x) if self.dataset_type != "test": y = [self._getting_item(start_idx + i)[:-1] for i in [12, 13, 14, 17, 20, 23]] y = np.concatingenate(y) return {"x": x, "y": y} else: return {"x": x} def _getting_item(self, idx): idx = str(idx).encode("ascii") try: with self.env.begin() as txn: data = txn.getting(idx) data = np.load(io.BytesIO(data)) x = np.zeros(495 * 436 * 9, dtype=np.uint8) x[data["x"]] = data["y"] x = x.reshape([495, 436, 9]) x = np.moveaxis(x, -1, 0) except: x = np.zeros([9, 495, 436], dtype=np.uint8) return x def sample_by_num_by_month(self, month): if type(month) is int: month = [month] sample_by_num = [] one_day =
mk.convert_datetime("2019-01-02")
pandas.to_datetime
import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.express as px import plotly.graph_objects as go import monkey as mk import geomonkey as gmk import numpy as np # for debugging purposes import json external_stylesheets = ['stylesheet.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) h_getting_max = 550 margin_val = 30 kf = mk.read_csv("data/data.csv") feature_names = kf.sip(['neighborhood code','neighborhood name', 'district name'], axis=1).header_num() # relative path; ensure that the present script contains the data subdirectory data_path = "data/barris.geojson" gkf = gmk.read_file(data_path) gkf.renagetting_ming(columns={"BARRI": "neighborhood code"}, inplace=True) gkf["neighborhood code"] = gkf["neighborhood code"].employ(int) gkf["nbd code"] = gkf["neighborhood code"] kf_unionerd =
mk.unioner(gkf, kf, on="neighborhood code")
pandas.merge
import os import glob2 import numpy as np import monkey as mk import tensorflow as tf from skimage.io import imread # /datasets/faces_emore_112x112_folders/*/*.jpg' default_image_names_reg = "*/*.jpg" default_image_classes_rule = lambda path: int(os.path.basename(os.path.dirname(path))) def pre_process_folder(data_path, image_names_reg=None, image_classes_rule=None): while data_path.endswith("/"): data_path = data_path[:-1] if not data_path.endswith(".npz"): dest_pickle = os.path.join("./", os.path.basename(data_path) + "_shuffle.npz") else: dest_pickle = data_path if os.path.exists(dest_pickle): aa = np.load(dest_pickle) if length(aa.keys()) == 2: image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], [] else: # dataset with embedding values image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], aa["embeddings"] print(">>>> reloaded from dataset backup:", dest_pickle) else: if not os.path.exists(data_path): return [], [], [], 0, None if image_names_reg is None or image_classes_rule is None: image_names_reg, image_classes_rule = default_image_names_reg, default_image_classes_rule image_names = glob2.glob(os.path.join(data_path, image_names_reg)) image_names = np.random.permutation(image_names).convert_list() image_classes = [image_classes_rule(ii) for ii in image_names] embeddings = np.array([]) np.savez_compressed(dest_pickle, image_names=image_names, image_classes=image_classes) classes = np.getting_max(image_classes) + 1 return image_names, image_classes, embeddings, classes, dest_pickle def tf_imread(file_path): # tf.print('Reading file:', file_path) img = tf.io.read_file(file_path) img = tf.image.decode_jpeg(img, channels=3) # [0, 255] img = tf.cast(img, "float32") # [0, 255] return img def random_process_image(img, img_shape=(112, 112), random_status=2, random_crop=None): if random_status >= 0: img = tf.image.random_flip_left_right(img) if random_status >= 1: # 25.5 == 255 * 0.1 img = tf.image.random_brightness(img, 25.5 * random_status) if random_status >= 2: img = tf.image.random_contrast(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status) img = tf.image.random_saturation(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status) if random_status >= 3 and random_crop is not None: img = tf.image.random_crop(img, random_crop) img = tf.image.resize(img, img_shape) if random_status >= 1: img = tf.clip_by_value(img, 0.0, 255.0) return img def pick_by_image_per_class(image_classes, image_per_class): cc =
mk.counts_value_num(image_classes)
pandas.value_counts
# Lint as: python3 """Tests for main_heatmapping.""" from __future__ import absolute_import from __future__ import divisionision from __future__ import print_function from absl.testing import absltest from absl.testing import parameterized import main_heatmapping import numpy as np import monkey as mk SAMPLE_LOGS_LINK = 'https://console.cloud.google.com/logs?project=xl-ml-test&advancedFilter=resource.type%3Dk8s_container%0Aresource.labels.project_id%3Dxl-ml-test%0Aresource.labels.location=us-central1-b%0Aresource.labels.cluster_name=xl-ml-test%0Aresource.labels.namespace_name=automated%0Aresource.labels.pod_name:pt-1.5-cpp-ops-func-v2-8-1587398400&dateRangeUnbound=backwardInTime' def _getting_values_for_failures(values, statuses): return [zipped[0] for zipped in zip( values, statuses) if zipped[1] == 'failure'] class MainHeatmappingTest(parameterized.TestCase): @parameterized.named_parameters( ('total_all_success_total_all_oob', { 'job_statuses': ['success', 'success', 'success'], 'metric_statuses': ['failure', 'failure', 'failure'], 'expected_overtotal_all_statuses': ['failure', 'failure', 'failure'], 'expected_job_status_abbrevs': ['M', 'M', 'M']}), ('total_all_success_some_oob', { 'job_statuses': ['success', 'success', 'success'], 'metric_statuses': ['failure', 'failure', 'success'], 'expected_overtotal_all_statuses': ['failure', 'failure', 'success'], 'expected_job_status_abbrevs': ['M', 'M', '']}), ('total_all_success_none_oob', { 'job_statuses': ['success', 'success', 'success'], 'metric_statuses': ['success', 'success', 'success'], 'expected_overtotal_all_statuses': ['success', 'success', 'success'], 'expected_job_status_abbrevs': ['', '', '']}), ('some_success_some_oob', { 'job_statuses': ['success', 'failure', 'success'], 'metric_statuses': ['success', 'success', 'failure'], 'expected_overtotal_all_statuses': ['success', 'failure', 'failure'], 'expected_job_status_abbrevs': ['', 'F', 'M']}), ) def test_process_knowledgeframes(self, args_dict): job_statuses = args_dict['job_statuses'] metric_statuses = args_dict['metric_statuses'] assert length(job_statuses) == length(metric_statuses) job_status_kf = mk.KnowledgeFrame({ 'test_name': mk.Collections(['test{}'.formating(n) for n in range( length(job_statuses))]), 'run_date': mk.Collections(['2020-04-{:02d}'.formating(n) for n in range( length(job_statuses))]), 'job_status': mk.Collections(job_statuses), 'logs_link': mk.Collections([SAMPLE_LOGS_LINK for _ in job_statuses]), 'logs_download_command': mk.Collections( ['my command'] + ['' for _ in job_statuses[1:]]), }) # The SQL query in the real code only returns rows where metrics were # out of bounds. These oobs rows correspond to 'failure' for # metric_statuses in this test. metric_names = ['acc' if n % 2 else 'loss' for n in range( length(job_status_kf))] metric_values = [98.0 if n % 2 else 0.6 for n in range( length(job_status_kf))] metric_upper_bounds = [np.nan if n % 2 else 0.5 for n in range( length(job_status_kf))] metric_lower_bounds = [99.0 if n % 2 else np.nan for n in range( length(job_status_kf))] metric_status_kf = mk.KnowledgeFrame({ 'test_name': mk.Collections(_getting_values_for_failures( job_status_kf['test_name'].convert_list(), metric_statuses)), 'run_date': mk.Collections(_getting_values_for_failures( job_status_kf['run_date'].convert_list(), metric_statuses)), 'metric_name': mk.Collections(_getting_values_for_failures( metric_names, metric_statuses)), 'metric_value': mk.Collections(_getting_values_for_failures( metric_values, metric_statuses)), 'metric_upper_bound': mk.Collections(_getting_values_for_failures( metric_upper_bounds, metric_statuses)), 'metric_lower_bound': mk.Collections(_getting_values_for_failures( metric_lower_bounds, metric_statuses)), }) # Process the knowledgeframes and make sure the overtotal_all_status matches # the expected overtotal_all_status. kf = main_heatmapping.process_knowledgeframes(job_status_kf, metric_status_kf) self.assertEqual(kf['overtotal_all_status'].convert_list(), args_dict['expected_overtotal_all_statuses']) self.assertEqual(kf['job_status_abbrev'].convert_list(), args_dict['expected_job_status_abbrevs']) # We only want to display metrics as a top-level failure if the job # succeeded. For failed jobs, it's not so helpful to know that the # metrics were out of bounds. metrics_failure_explanations = kf['failed_metrics'].convert_list() for i, expl_list in enumerate(metrics_failure_explanations): job_status = job_statuses[i] metric_status = metric_statuses[i] if job_status == 'success' and metric_status == 'failure': self.assertGreaterEqual(length(expl_list), 1) for expl in expl_list: self.assertTrue('outside' in expl) else: self.assertFalse(expl_list) commands = kf['logs_download_command'].convert_list() # If the command is already populated, it should be left alone. self.assertEqual(commands[0], 'my command') def test_process_knowledgeframes_no_job_status(self): job_status_kf = mk.KnowledgeFrame({ 'test_name': mk.Collections(['a', 'b']), 'run_date': mk.Collections(['2020-04-10', '2020-04-11']), 'logs_link': mk.Collections(['c', 'd']), 'logs_download_command': mk.Collections(['e', 'f']), }) kf = main_heatmapping.process_knowledgeframes(job_status_kf, mk.KnowledgeFrame()) self.assertTrue(kf.empty) kf = main_heatmapping.process_knowledgeframes(mk.KnowledgeFrame(), mk.KnowledgeFrame()) self.assertTrue(kf.empty) def test_make_plot(self): input_kf = mk.KnowledgeFrame({ 'test_name': mk.Collections(['test1', 'test2', 'test3']), 'run_date':
mk.Collections(['2020-04-21', '2020-04-20', '2020-04-19'])
pandas.Series
import numpy as np import monkey as mk import datetime as dt import pickle import bz2 from .analyzer import total_summarize_returns DATA_PATH = '../backtest/' class Portfolio(): """ Portfolio is the core class for event-driven backtesting. It conducts the backtesting in the following order: 1. Initialization: Set the capital base we invest and the securities we want to trade. 2. Receive the price informatingion with .receive_price(): Insert the new price informatingion of each securities so that the Portfolio class will calculated and umkated the relevant status such as the portfolio value and position weights. 3. Rebalance with .rebalance(): Depending on the signal, we can choose to change the position on each securities. 4. Keep position with .keep_position(): If we don't rebalance the portfolio, we need to tell it to keep current position at the end of the market. Example ------- see Vol_MA.ipynb, Vol_MA_test_robustness.ipynb Parameters ---------- capital: numeric capital base we put into the porfolio inception: datetime.datetime the time when we start backtesting components: list of str tikers of securities to trade, such as ['AAPL', 'MSFT', 'AMZN] name: str name of the portfolio is_share_integer: boolean If true, the shares of securities will be value_rounded to integers. """ def __init__(self, capital, inception, components, name='portfolio', is_share_integer=False): # ----------------------------------------------- # initialize parameters # ----------------------------------------------- self.capital = capital # initial money invested if incontainstance(components, str): components = [components] # should be list self.components = components # equities in the portfolio # self.commission_rate = commission_rate self.inception = inception self.component_prices = mk.KnowledgeFrame(columns=self.components) self.name = name self.is_share_integer = is_share_integer # self.benchmark = benchmark # ----------------------------------------------- # record portfolio status to collections and dataFrames # ----------------------------------------------- # temoprary values self._nav = mk.Collections(capital,index=[inception]) self._cash = mk.Collections(capital,index=[inception]) self._security = mk.Collections(0,index=[inception]) self._component_prices = mk.KnowledgeFrame(columns=self.components) # empty self._shares = mk.KnowledgeFrame(0, index=[inception], columns=self.components) self._positions = mk.KnowledgeFrame(0, index=[inception], columns=self.components) self._weights = mk.KnowledgeFrame(0, index=[inception], columns=self.components) self._share_changes = mk.KnowledgeFrame(columns=self.components) # empty self._now = self.inception self._getting_max_nav = mk.Collections(capital,index=[inception]) self._drawdown = mk.Collections(0, index=[inception]) self._relative_drawdown = mk.Collections(0, index=[inception]) # collections self.nav_open = mk.Collections() self.nav_close =
mk.Collections()
pandas.Series
import datetime import monkey as mk import numpy as np import numpy.ma as ma import matplotlib.pyplot as plt import matplotlib.dates as mdates def plot_team(team): years = [2012,2013,2014,2015,2016,2017] g = mk.read_csv("audl_elo.csv") dates = mk.convert_datetime(g[(g["team_id"] == team)]["date"]) elo = g[(g["team_id"] == team)]["elo_n"] plt.plot(dates,elo) plt.show() def plot_team_b(team): years = [2012,2013,2014,2015,2016,2017] g = mk.read_csv("audl_elo.csv") fig, axs = plt.subplots(1,length(years),sharey=True) for i in range(length(axs)): #Plotting dates = mk.convert_datetime(g[(g["team_id"] == team) & (g["year_id"] == years[i])]["date"]) elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_n"] axs[i].plot(dates,elo) #Formatting axs[i].xaxis.set_ticks_position('none') axs[i].set_xlabel(str(years[i])) axs[i].tick_params('x',labelbottom=False) axs[i].set_ylim(1050,1950) if i == 0: axs[i].yaxis.tick_left() axs[i].set_yticks(range(1100,2000,100)) if i != length(axs)-1: axs[i].spines['right'].set_visible(False) if i != 0: axs[i].yaxis.set_ticks_position('none') axs[i].spines['left'].set_visible(False) plt.show() def plot_teams(teams): years = [2012,2013,2014,2015,2016,2017] g = mk.read_csv("audl_elo.csv") #plt.style.use('fivethirtyeight') fig, axs = plt.subplots(1,length(years),sharey=True) for i in range(length(axs)): season_start = mk.convert_datetime(g[(g["year_id"] == years[i])]["date"]).getting_min() - datetime.timedelta(7) season_end= mk.convert_datetime(g[(g["year_id"] == years[i])]["date"]).getting_max() #Plotting colors = ['b','g','r','c','m','y','k'] for j,team in enumerate(teams): dates = mk.convert_datetime(g[(g["team_id"] == team) & (g["year_id"] == years[i])]["date"]) if dates.shape[0] > 0: dates = mk.Collections(season_start).adding(dates) elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_n"] if elo.shape[0] > 0: start_elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_i"].iloc[0] elo =
mk.Collections(start_elo)
pandas.Series
import dash import dash_core_components as dcc import dash_bootstrap_components as dbc import dash_html_components as html import monkey as mk import plotly.express as px import plotly.graph_objs as go from datetime import date import dash_loading_spinners as dls from dash.dependencies import Input, Output, ClientsideFunction, State from app import app import requests features = ["Screw Speed", "Gas Flow Rate", "Steam Pressure", "Oven-Home Temperature", "Water Temperature", "Oxygen_pct", "Oven-Home Pressure", "Combustion Air Pressure", "Temperature before prear", "Temperature after prear", "Burner Position", "Burner_pct", "Borra Flow Rate_kgh", "Cisco Flow Rate_kgh"] cardtab_1 = dbc.Card([ html.Div( id='output-container-date-picker-range', className="month-container" ), dls.Hash( dcc.Graph(id="graph-steam", className = "graph-card"), size = 160, speed_multiplier = 0.8, debounce = 200 ) ]) cardtab_2 = dbc.Card([ html.Div( id='output-container-date-picker-range', className="month-container" ), dls.Hash( dcc.Graph(id="graph-distribution", className = "graph-card"), size = 160, speed_multiplier = 0.8, debounce = 200 ) ]) card_3 = dbc.Card( [ dbc.Col([ dbc.Col([ html.P( "Select date range that you want to see:" ), dcc.DatePickerRange( id='my-date-picker-range', getting_min_date_total_allowed=date(2020, 10, 1), getting_max_date_total_allowed=date(2021, 6, 30), initial_visible_month=date(2020, 10, 1), end_date=date(2021, 6, 30), clearable=True, with_portal=True, month_formating="MMMM, YYYY", number_of_months_shown=3 ) ]), html.Hr(), dbc.Col([ html.P( "Select the data frequency:" ), dbc.RadioItems( id='frequency-radioitems', labelStyle={"display": "inline-block"}, options= [ {"label": "Daily", "value": "data_daily"}, {"label": "Hourly", "value": "data_hourly"} ], value= "data_daily", style= {"color": "black"} ) ]) ]) ]) card_4 = dbc.Card([ dbc.Col([ dbc.FormGroup([ dbc.Label("Y - Axis"), dcc.Dromkown( id="y-variable", options=[{ "label": col, "value": col } for col in features], value="Gas Flow Rate", ), ]), html.H6("Efficiency Range"), dcc.RangeSlider( id='slider-efficiency', getting_min=0, getting_max=1.00, step=0.01, value=[0, 1.00] ), html.P(id='range-efficiency') ]) ]) card_5 = dbc.Card([ html.Div( id='output-container-date-picker-range', className="month-container" ), dls.Hash( dcc.Graph(id="graph-comparison", className = "graph-card"), size = 160, speed_multiplier = 0.8, debounce = 200 ) ]) layout= [ html.Div([ # html.Img( # src = "/assets/images/C1_icon_1.png", # className = "corr-icon" # ), html.Img( src = "/assets/images/Buencafe-logo.png", className = "corr-icon" ), html.H2( "Steam Analytics", className = "content-title" ), html.Div(children=[ html.Div([ # dbc.Row([ # dbc.Col( # dbc.Tabs([ # dbc.Tab(cardtab_1, label="Time collections"), # dbc.Tab(cardtab_2, label="Distribution"), # ], # id="card-tabs", # card=True, # active_tab="tab-1", # ), # width=9 # ), # dbc.Col( # card_3, width=3 # ) # ]), dbc.Tabs([ dbc.Tab(cardtab_1, label="Time collections"), dbc.Tab(cardtab_2, label="Distribution"), ], id="card-tabs", card=True, active_tab="tab-1", ), card_3, ], className = "graph_col_1"), html.Div(children =[ # dbc.Row([ # dbc.Col( # card_4, width=3 # ), # dbc.Col( # card_5, width=9 # ) # ]), card_4, card_5 ], className = "data_col_2") ], className = "wrapper__steam-data") ],className = "wrapper__steam"), ] @app.ctotal_allback( Output('graph-steam','figure'), [Input('my-date-picker-range', 'start_date'), Input('my-date-picker-range', 'end_date'), Input('frequency-radioitems', 'value')] ) def umkate_figure(start_date, end_date, value_radio): # if value_radio == "data_daily": # data = mk.read_csv("data/data_interpolate_daily.csv", parse_dates=["Time"]) # data.set_index(["Time"], inplace=True) # elif value_radio == "data_hourly": # data = mk.read_csv("data/data_interpolate_hourly.csv", parse_dates=["Time"]) # data.set_index(["Time"], inplace=True) try: if value_radio == "data_daily": query = "SELECT * FROM daily" payload = { "query": query } petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload) test_var = petition.json()['body'] data = mk.KnowledgeFrame(test_var) data['Time'] =
mk.convert_datetime(data['Time'])
pandas.to_datetime
import numpy as np import monkey as mk # from scipy.stats import gamma np.random.seed(181336) number_regions = 5 number_strata = 10 number_units = 5000 units = np.linspace(0, number_units - 1, number_units, dtype="int16") + 10 * number_units units = units.totype("str") sample_by_num = mk.KnowledgeFrame(units) sample_by_num.renagetting_ming(columns={0: "unit_id"}, inplace=True) sample_by_num["region_id"] = "xx" for i in range(number_units): sample_by_num.loc[i]["region_id"] = sample_by_num.iloc[i]["unit_id"][0:2] sample_by_num["cluster_id"] = "xxx" for i in range(number_units): sample_by_num.loc[i]["cluster_id"] = sample_by_num.iloc[i]["unit_id"][0:4] area_type = mk.KnowledgeFrame(np.distinctive(sample_by_num["cluster_id"])) area_type.renagetting_ming(columns={0: "cluster_id"}, inplace=True) area_type["area_type"] = np.random.choice(("urban", "rural"), area_type.shape[0], p=(0.4, 0.6)) sample_by_num =
mk.unioner(sample_by_num, area_type, on="cluster_id")
pandas.merge
""" Coding: UTF-8 Author: Randal Time: 2021/2/20 E-mail: <EMAIL> Description: This is a simple toolkit for data extraction of text. The most important function in the script is about word frequency statistics. Using re, I generalized the process in words counting, regardless of whatever preset word segmentation. Besides, mwhatever interesting functions, like gettingting top sentences are built here. All rights reserved. """ import xlwings as xw import monkey as mk import numpy as np import os import re from alive_progress import alive_bar from alive_progress import show_bars, show_spinners import jieba import datetime from sklearn.feature_extraction.text import CountVectorizer, TfikfVectorizer import math class jieba_vectorizer(CountVectorizer): def __init__(self, tf, userdict, stopwords, orient=False): """ :param tf: ่พ“ๅ…ฅ็š„ๆ ทๆœฌๆก†๏ผŒ{axis: 1, 0: id, 1: ๆ ‡้ข˜, 2: ๆญฃๆ–‡, 3: ๆฅๆบ, 4: freq} :param stopwords: ๅœ็”จ่ฏ่กจ็š„่ทฏๅพ„ :param user_dict_link: ๅ…ณ้”ฎ่ฏๆธ…ๅ•็š„่ทฏๅพ„ :param orient: {True: ่ฟ”ๅ›ž็š„ DTM ๅชๅŒ…ๆ‹ฌๅ…ณ้”ฎ่ฏๆธ…ๅ•ไธญ็š„่ฏ๏ผŒFalse: ่ฟ”ๅ›ž DTM ไธญๅŒ…ๅซๅ…จ้ƒจ่ฏ่ฏญ} :return: ๅฏไปฅ็›ดๆŽฅไฝฟ็”จ็š„่ฏๅ‘้‡ๆ ทๆœฌ """ self.userdict = userdict self.orient = orient self.stopwords = stopwords jieba.load_userdict(self.userdict) # ่ฝฝๅ…ฅๅ…ณ้”ฎ่ฏ่ฏๅ…ธ tf = tf.clone() # ้˜ฒๆญขๅฏนๅ‡ฝๆ•ฐไน‹ๅค–็š„ๅŽŸๆ ทๆœฌๆก†้€ ๆˆๆ”นๅŠจ print('ๅˆ‡่ฏไธญ๏ผŒ่ฏท็จๅ€™โ€ฆโ€ฆ') rule = re.compile(u'[^\u4e00-\u9fa5]') # ๆธ…ๆด—ๆ‰€ๆœ‰ๆ ทๆœฌ๏ผŒๅชไฟ็•™ๆฑ‰ๅญ— for i in range(0, tf.shape[0]): try: tf.iloc[i, 2] = rule.sub('', tf.iloc[i, 2]) except TypeError: print('ๆ ทๆœฌๆธ…ๆด—Error: doc_id = ' + str(i)) continue if self.stopwords is not None: stopwords = txt_to_list(self.stopwords) # ่ฝฝๅ…ฅๅœ็”จ่ฏ่กจ else: stopwords = [] # ๅผ€ๅง‹ๅˆ‡่ฏ words = [] items = range(0, length(tf)) with alive_bar(length(items), force_tty=True, bar='circles') as bar: for i, row in tf.traversal(): item = row['ๆญฃๆ–‡'] result = jieba.cut(item) # ๅŒๆ—ถ่ฟ‡ๆปคๅœ็”จ่ฏ word = '' for element in result: if element not in stopwords: if element != '\t': word += element word += " " words.adding(word) bar() # CountVectorizer() ๅฏไปฅ่‡ชๅŠจๅฎŒๆˆ่ฏ้ข‘็ปŸ่ฎก๏ผŒ้€š่ฟ‡fit_transform็”Ÿๆˆๆ–‡ๆœฌๅ‘้‡ๅ’Œ่ฏ่ข‹ๅบ“ # ๅฆ‚ๆžœ้œ€่ฆๆขๆˆ tfikfVectorizer, ๆŠŠไธ‹้ขไธ‰่กŒไฟฎๆ”นไธ€ไธ‹ๅฐฑๅฏไปฅไบ† vect = CountVectorizer() X = vect.fit_transform(words) self.vectorizer = vect matrix = X X = X.toarray() # ไบŒ็ปดndarrayๅฏไปฅๅฑ•็คบๅœจpycharm้‡Œ๏ผŒไฝ†ๆ˜ฏๅ’ŒKnowledgeFrameๆ€ง่ดจๅฎŒๅ…จไธๅŒ # ndarray ๆฒกๆœ‰ index ๅ’Œ column features = vect.getting_feature_names() XX = mk.KnowledgeFrame(X, index=tf['id'], columns=features) self.DTM0 = matrix self.DTM = XX self.features = features # # ไธ‹้ขๆ˜ฏไน‹ๅ‰่ตฐ็š„ๅผฏ่ทฏ๏ผŒไธ่ถณไธ€ๅ“‚ # words_bag = vect.vocabulary_ # # ๅญ—ๅ…ธ็š„่ฝฌ็ฝฎ๏ผˆๆณจๆ„ๅช้€‚็”จไบŽvkไธ€ไธ€ๅฏนๅบ”็š„ๆƒ…ๅ†ต๏ผŒ1vๅคšk่ฏทๅ‚่€ƒsetdefault) # bag_words = dict((v, k) for k, v in words_bag.items()) # # # ๅญ—ๅ…ธๅ…ƒ็ด ็š„ๆŽ’ๅˆ—้กบๅบไธ็ญ‰ไบŽๅญ—ๅ…ธๅ…ƒ็ด ๅ€ผ็š„ๆŽ’ๅˆ—้กบๅบ # lst = [] # for i in range(0, length(XX.columns)): # lst.adding(bag_words[i]) # XX.columns = lst if orient: dict_filter = txt_to_list(self.userdict) for word in features: if word not in dict_filter: XX.sip([word], axis=1, inplace=True) self.DTM_key = XX def getting_feature_names(self): return self.features def strip_non_keywords(self, kf): ff = kf.clone() dict_filter = txt_to_list(self.userdict) for word in self.features: if word not in dict_filter: ff.sip([word], axis=1, inplace=True) return ff def make_doc_freq(word, doc): """ :param word: ๆŒ‡็š„ๆ˜ฏ่ฆๅฏนๅ…ถ่ฟ›่กŒ่ฏ้ข‘็ปŸ่ฎก็š„ๅ…ณ้”ฎ่ฏ :param doc: ๆŒ‡็š„ๆ˜ฏ่ฆ้ๅŽ†็š„ๆ–‡ๆœฌ :return: lst: ่ฟ”ๅ›žๅญ—ๅ…ธ๏ผŒ่ฎฐๅฝ•ๅ…ณ้”ฎ่ฏๅœจๆ–‡ๆœฌๅฝ“ไธญๅ‡บ็Žฐ็š„้ข‘ๆฌกไปฅๅŠไธŠไธ‹ๆ–‡ """ # ไฝฟ็”จๆญฃๅˆ™่กจ่พพๅผ่ฟ›่กŒๅŒน้…, ๆ‹ผๆŽฅๆˆpattern # re.S่กจ็คบไผš่‡ชๅŠจๆข่กŒ # finditerๆ˜ฏfindtotal_all็š„่ฟญไปฃๅ™จ็‰ˆๆœฌ๏ผŒ้€š่ฟ‡้ๅŽ†ๅฏไปฅไพๆฌกๆ‰“ๅฐๅ‡บๅญไธฒๆ‰€ๅœจ็š„ไฝ็ฝฎ it = re.finditer(word, doc, re.S) # match.group()ๅฏไปฅ่ฟ”ๅ›žๅญไธฒ๏ผŒmatch.span()ๅฏไปฅ่ฟ”ๅ›ž็ดขๅผ• lst = [] for match in it: lst.adding(match.span()) freq = dict() freq['Frequency'] = length(lst) # ๅฐ†ไธŠไธ‹ๆ–‡็ป“ๆžœไนŸๆ•ด็†ไธบไธ€ไธชๅญ—ๅ…ธ context = dict() for i in range(0, length(lst)): # ๅฐ†span็š„่Œƒๅ›ดๅ‰ๅŽๅ„ๆ‰ฉๅฑ•ไธๅคšไบŽ10ไธชๅญ—็ฌฆ๏ผŒๅพ—ๅˆฐไธŠไธ‹ๆ–‡ try: # ไธบไบ†ๅˆ’ๅ‡บ้€‚ๅฎœ็š„ๅ‰ๅŽๆ–‡่Œƒๅ›ด๏ผŒ้œ€่ฆ่ฎพๅฎš็ดขๅผ•็š„ๆœ€ๅคงๅ€ผๅ’Œๆœ€ๅฐๅ€ผ # ๅ› ๆญค่ฆๆฏ”่พƒspan+10ๅ’Œdocๆžๅคงๅ€ผ๏ผŒspan-10ๅ’Œdocๆžๅฐๅ€ผ # ๆœ€ๅคงๅ€ผๅœจไธค่€…้—ดๅ–ๅฐ๏ผŒๆœ€ๅฐๅ€ผๅœจไธค่€…้—ดๅ–ๅคง MAX = getting_min(lst[i][1] + 10, length(doc)) MIN = getting_max(0, lst[i][0] - 10) # ๅ–ๅพ—ไธŠไธ‹ๆ–‡ context[str(i)] = doc[MIN: MAX] except IndexError: print('IndexError: ' + word) freq['Context'] = context return freq def make_info_freq(name, pattern, doc): """ :param name: ๆŒ‡็š„ๆ˜ฏๅฏนๅ…ถ่ฟ›่กŒ่ฏ้ข‘็ปŸ่ฎก็š„ๅฝขๅผ :param pattern: ๆŒ‡็š„ๆ˜ฏๅฏนๅ…ถ่ฟ›่กŒ่ฏ้ข‘็ปŸ่ฎก็š„ๆญฃๅˆ™่กจ่พพๅผ :param doc: ๆŒ‡็š„ๆ˜ฏ่ฆ้ๅŽ†็š„ๆ–‡ๆœฌ :return: lst: ่ฟ”ๅ›žๅญ—ๅ…ธ๏ผŒ่ฎฐๅฝ•ๅ…ณ้”ฎ่ฏๅœจๆ–‡ๆœฌๅฝ“ไธญๅ‡บ็Žฐ็š„้ข‘ๆฌกไปฅๅŠไธŠไธ‹ๆ–‡ ๆณจ๏ผš่ฏฅๅ‡ฝๆ•ฐ่ฟ”ๅ›žๅญ—ๅ…ธไธญ็š„contextๅ…ƒ็ด ไธบๅ…ƒ็ป„๏ผš๏ผˆๅ…ณ้”ฎ่ฏ๏ผŒไธŠไธ‹ๆ–‡๏ผ‰ """ # ไฝฟ็”จๆญฃๅˆ™่กจ่พพๅผ่ฟ›่กŒๅŒน้…, ๆ‹ผๆŽฅๆˆpattern # re.S่กจ็คบไผš่‡ชๅŠจๆข่กŒ # finditerๆ˜ฏfindtotal_all็š„่ฟญไปฃๅ™จ็‰ˆๆœฌ๏ผŒ้€š่ฟ‡้ๅŽ†ๅฏไปฅไพๆฌกๆ‰“ๅฐๅ‡บๅญไธฒๆ‰€ๅœจ็š„ไฝ็ฝฎ it = re.finditer(pattern[0], doc, re.S) # match.group()ๅฏไปฅ่ฟ”ๅ›žๅญไธฒ๏ผŒmatch.span()ๅฏไปฅ่ฟ”ๅ›ž็ดขๅผ• cls = pattern[1] lst = [] for match in it: lst.adding(match.span()) freq = dict() freq['Frequency'] = length(lst) freq['Name'] = name # ๅฐ†ไธŠไธ‹ๆ–‡็ป“ๆžœไนŸๆ•ด็†ไธบไธ€ไธชๅญ—ๅ…ธ context = dict() for i in range(0, length(lst)): # ๅฐ†span็š„่Œƒๅ›ดๅ‰ๅŽๅ„ๆ‰ฉๅฑ•ไธๅคšไบŽ10ไธชๅญ—็ฌฆ๏ผŒๅพ—ๅˆฐไธŠไธ‹ๆ–‡ try: # ไธบไบ†ๅˆ’ๅ‡บ้€‚ๅฎœ็š„ๅ‰ๅŽๆ–‡่Œƒๅ›ด๏ผŒ้œ€่ฆ่ฎพๅฎš็ดขๅผ•็š„ๆœ€ๅคงๅ€ผๅ’Œๆœ€ๅฐๅ€ผ # ๅ› ๆญค่ฆๆฏ”่พƒspan+10ๅ’Œdocๆžๅคงๅ€ผ๏ผŒspan-10ๅ’Œdocๆžๅฐๅ€ผ # ๆœ€ๅคงๅ€ผๅœจไธค่€…้—ดๅ–ๅฐ๏ผŒๆœ€ๅฐๅ€ผๅœจไธค่€…้—ดๅ–ๅคง MAX = getting_min(lst[i][1] + 10, length(doc)) MIN = getting_max(0, lst[i][0] - 10) # ๅ–ๅพ—ๅŒน้…ๅˆฐ็š„ๅ…ณ้”ฎ่ฏ๏ผŒๅนถๅšๆŽๅคดๅŽปๅฐพๅค„็† word = match_cut(doc[lst[i][0]: lst[i][1]], cls) # ๅฐ†ๅ…ณ้”ฎ่ฏๅ’ŒไธŠไธ‹ๆ–‡ๆ‰“ๅŒ…๏ผŒๅญ˜ๅ‚จๅˆฐ context ๆก็›ฎไธญ context[str(i)] = (word, doc[MIN: MAX]) except IndexError: print('IndexError: ' + name) freq['Context'] = context return freq def make_docs_freq(word, docs): """ :param word: ๆŒ‡็š„ๆ˜ฏ่ฆๅฏนๅ…ถ่ฟ›่กŒ่ฏ้ข‘็ปŸ่ฎก็š„ๅ…ณ้”ฎ่ฏ :param docs: ๆ˜ฏ่ฆ้ๅŽ†็š„ๆ–‡ๆœฌ็š„้›†ๅˆ๏ผŒๅฟ…้กปๆ˜ฏmonkey KnowledgeFrame็š„ๅฝขๅผ๏ผŒ่‡ณๅฐ‘ๅŒ…ๅซidๅˆ— (iloc: 0)๏ผŒๆญฃๆ–‡ๅˆ— (iloc: 2) ๅ’Œ้ข„็•™ๅ‡บ็š„้ข‘ๆฌกๅˆ— (iloc: 4) :return: ่ฟ”ๅ›žๅญ—ๅ…ธ๏ผŒๅ…ถไธญๅŒ…ๆ‹ฌโ€œๅ•ๅ…ณ้”ฎ่ฏ-ๅ•ๆ–‡ๆœฌโ€็š„่ฏ้ข‘ๅญ—ๅ…ธ้›†ๅˆ๏ผŒไปฅๅŠ่ฎกๆ•ฐ็ป“ๆžœๆฑ‡ๆ€ป """ freq = dict() # ๅ› ไธบๆ€ป้ข‘ๆ•ฐๆ˜ฏ้€š่ฟ‡"+="็š„ๆ–นๅผ่ฎก็ฎ—๏ผŒไธๆ˜ฏ็ฎ€ๅ•่ต‹ๅ€ผ๏ผŒๆ‰€ไปฅ่ฆ้ข„่ฎพไธบ0 freq['Total Frequency'] = 0 docs = docs.clone() # ้˜ฒๆญขๅฏนๅ‡ฝๆ•ฐไน‹ๅค–็š„ๅŽŸๆ ทๆœฌๆก†้€ ๆˆๆ”นๅŠจ for i in range(0, length(docs)): # ๅฏนไบŽๆฏไธชๆ–‡ๆกฃ๏ผŒ้ƒฝๅฝขๆˆไธ€ไธชๅญ—ๅ…ธ๏ผŒๅญ—ๅ…ธๅŒ…ๆ‹ฌๅ…ณ้”ฎ่ฏๅœจ่ฏฅๆ–‡ๆกฃๅ‡บ็Žฐ็š„้ข‘ๆ•ฐๅ’ŒไธŠไธ‹ๆ–‡ # id้œ€่ฆๅœจ็ฌฌ0ๅˆ—๏ผŒๆญฃๆ–‡้œ€่ฆๅœจ็ฌฌ2ๅˆ— freq['Doc' + str(docs.iloc[i, 0])] = make_doc_freq(word, docs.iloc[i, 2]) # ๅœจ็ป™ๆฏไธชๆ–‡ๆกฃๅฝขๆˆๅญ—ๅ…ธ็š„ๅŒๆ—ถ๏ผŒๅฏนไบŽๆ€ปๆฆ‚็Ž‡่ฟ›่กŒๆปšๅŠจๅŠ ๆ€ป freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency'] docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency'] # ๆŽฅไธ‹ๆฅๅปบ็ซ‹ไธ€ไธชDFC(doc-freq-context)็ปŸ่ฎก้ขๆฟ๏ผŒๆฑ‡ๆ€ปๆ‰€ๆœ‰ๆ–‡ๆกฃๅฏนๅบ”็š„่ฏ้ข‘ๆ•ฐๅ’ŒไธŠไธ‹ๆ–‡ # ้ฆ–ๅ…ˆๆž„ๅปบ(id, freq)็š„ๅญ—ๅ…ธๆ˜ ๅฐ„ xs = docs['id'] ys = docs['freq'] # zip(่ฟญไปฃๅ™จ)ๆ˜ฏไธ€ไธชๅพˆๅฅฝ็”จ็š„ๆ–นๆณ•๏ผŒๅปบ่ฎฎๅคš็”จ id_freq = {x: y for x, y in zip(xs, ys)} # ๆ–ฐๅปบไธ€ไธช็ฉบๅฃณKnowledgeFrame๏ผŒๆŽฅไธ‹ๆฅๆŠŠๆ•ฐๆฎไธ€ๆกไธ€ๆก็ฒ˜่ดด่ฟ›ๅŽป data = mk.KnowledgeFrame(columns=['id', 'freq', 'word', 'num', 'context']) for item in xs: doc = freq['Doc' + str(item)] num = doc['Frequency'] context = doc['Context'] for i in range(0, num): strip = {'id': item, 'freq': id_freq[item], 'word': word, 'num': i, 'context': context[str(i)]} # ้ป˜่ฎคorientๅ‚ๆ•ฐ็ญ‰ไบŽcolumns # ๅฆ‚ๆžœๅญ—ๅ…ธ็š„ๅ€ผๆ˜ฏๆ ‡้‡๏ผŒ้‚ฃๅฐฑๅฟ…้กปไผ ้€’ไธ€ไธชindex๏ผŒ่ฟ™ๆ˜ฏ่ง„ๅฎš strip = mk.KnowledgeFrame(strip, index=[None]) # kf็š„addingๆ–นๆณ•ๅช่ƒฝ้€š่ฟ‡้‡ๆ–ฐ่ต‹ๅ€ผๆฅ่ฟ›่กŒไฟฎๆ”น data = data.adding(strip) data.set_index(['id', 'freq', 'word'], sip=True, inplace=True) freq['DFC'] = data return freq def make_infos_freq(name, pattern, docs): """ :param name: ๆŒ‡็š„ๆ˜ฏๅฏนๅ…ถ่ฟ›่กŒ่ฏ้ข‘็ปŸ่ฎก็š„ๅฝขๅผ :param pattern: ๆŒ‡็š„ๆ˜ฏๅฏนๅ…ถ่ฟ›่กŒ่ฏ้ข‘็ปŸ่ฎก็š„๏ผˆๆญฃๅˆ™่กจ่พพๅผ, ่ฃๅ‰ชๆ–นๆณ•๏ผ‰ :param docs: ๆ˜ฏ่ฆ้ๅŽ†็š„ๆ–‡ๆœฌ็š„้›†ๅˆ๏ผŒๅฟ…้กปๆ˜ฏmonkey KnowledgeFrame็š„ๅฝขๅผ๏ผŒ่‡ณๅฐ‘ๅŒ…ๅซidๅˆ—(iloc: 0)ๅ’Œๆญฃๆ–‡ๅˆ—(iloc: 2) :return: ่ฟ”ๅ›žๅญ—ๅ…ธ๏ผŒๅ…ถไธญๅŒ…ๆ‹ฌโ€œๅ•ๅ…ณ้”ฎ่ฏ-ๅ•ๆ–‡ๆœฌโ€็š„่ฏ้ข‘ๅญ—ๅ…ธ้›†ๅˆ๏ผŒไปฅๅŠ่ฎกๆ•ฐ็ป“ๆžœๆฑ‡ๆ€ป """ freq = dict() # ๅ› ไธบๆ€ป้ข‘ๆ•ฐๆ˜ฏ้€š่ฟ‡"+="็š„ๆ–นๅผ่ฎก็ฎ—๏ผŒไธๆ˜ฏ็ฎ€ๅ•่ต‹ๅ€ผ๏ผŒๆ‰€ไปฅ่ฆ้ข„่ฎพไธบ0 freq['Total Frequency'] = 0 docs = docs.clone() # ้˜ฒๆญขๅฏนๅ‡ฝๆ•ฐไน‹ๅค–็š„ๅŽŸๆ ทๆœฌๆก†้€ ๆˆๆ”นๅŠจ items = range(0, length(docs)) with alive_bar(length(items), force_tty=True, bar='circles') as bar: for i in items: # ๅฏนไบŽๆฏไธชๆ–‡ๆกฃ๏ผŒ้ƒฝๅฝขๆˆไธ€ไธชๅญ—ๅ…ธ๏ผŒๅญ—ๅ…ธๅŒ…ๆ‹ฌๅ…ณ้”ฎ่ฏๅœจ่ฏฅๆ–‡ๆกฃๅ‡บ็Žฐ็š„้ข‘ๆ•ฐๅ’ŒไธŠไธ‹ๆ–‡ # id้œ€่ฆๅœจ็ฌฌ0ๅˆ—๏ผŒๆญฃๆ–‡้œ€่ฆๅœจ็ฌฌ2ๅˆ— # pattern ่ฆๅ…จ้กปๅ…จๅฐพๅœฐไผ ้€’่ฟ›ๅŽป๏ผŒๅ› ไธบmake_info_freqไธคไธชๅ‚ๆ•ฐ้ƒฝ่ฆ็”จ freq['Doc' + str(docs.iloc[i, 0])] = make_info_freq(name, pattern, docs.iloc[i, 2]) # ๅœจ็ป™ๆฏไธชๆ–‡ๆกฃๅฝขๆˆๅญ—ๅ…ธ็š„ๅŒๆ—ถ๏ผŒๅฏนไบŽๆ€ปๆฆ‚็Ž‡่ฟ›่กŒๆปšๅŠจๅŠ ๆ€ป freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency'] docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency'] bar() # ๆŽฅไธ‹ๆฅๅปบ็ซ‹ไธ€ไธชDFC(doc-freq-context)็ปŸ่ฎก้ขๆฟ๏ผŒๆฑ‡ๆ€ปๆ‰€ๆœ‰ๆ–‡ๆกฃๅฏนๅบ”็š„่ฏ้ข‘ๆ•ฐๅ’ŒไธŠไธ‹ๆ–‡ # ้ฆ–ๅ…ˆๆž„ๅปบ(id, freq)็š„ๅญ—ๅ…ธๆ˜ ๅฐ„ xs = docs['id'] ys = docs['freq'] # zip(่ฟญไปฃๅ™จ)ๆ˜ฏไธ€ไธชๅพˆๅฅฝ็”จ็š„ๆ–นๆณ•๏ผŒๅปบ่ฎฎๅคš็”จ id_freq = {x: y for x, y in zip(xs, ys)} # ๆ–ฐๅปบไธ€ไธช็ฉบๅฃณKnowledgeFrame๏ผŒๆŽฅไธ‹ๆฅๆŠŠๆ•ฐๆฎไธ€ๆกไธ€ๆก็ฒ˜่ดด่ฟ›ๅŽป data = mk.KnowledgeFrame(columns=['id', 'freq', 'form', 'word', 'num', 'context']) for item in xs: doc = freq['Doc' + str(item)] num = doc['Frequency'] # ไปŽ๏ผˆๅ…ณ้”ฎ่ฏ๏ผŒไธŠไธ‹ๆ–‡๏ผ‰ไธญๅ–ๅ‡บไธคไธชๅ…ƒ็ด  context = doc['Context'] for i in range(0, num): # context ไธญ็š„ๅ…ณ้”ฎ่ฏๅทฒ็ป match_cut ๅฎŒๆฏ•๏ผŒไธ้œ€่ฆ้‡ๅคๅค„็† strip = {'id': item, 'form': name, 'freq': id_freq[item], 'word': context[str(i)][0], 'num': i, 'context': context[str(i)][1]} # ้ป˜่ฎคorientๅ‚ๆ•ฐ็ญ‰ไบŽcolumns # ๅฆ‚ๆžœๅญ—ๅ…ธ็š„ๅ€ผๆ˜ฏๆ ‡้‡๏ผŒ้‚ฃๅฐฑๅฟ…้กปไผ ้€’ไธ€ไธชindex๏ผŒ่ฟ™ๆ˜ฏ่ง„ๅฎš strip = mk.KnowledgeFrame(strip, index=[None]) # kf็š„addingๆ–นๆณ•ๅช่ƒฝ้€š่ฟ‡้‡ๆ–ฐ่ต‹ๅ€ผๆฅ่ฟ›่กŒไฟฎๆ”น data = data.adding(strip) data.set_index(['id', 'freq', 'form', 'word'], sip=True, inplace=True) freq['DFC'] = data print(name + ' Completed') return freq def words_docs_freq(words, docs): """ :param words: ่กจ็คบ่ฆๅฏนๅ…ถๅš่ฏ้ข‘็ปŸ่ฎก็š„ๅ…ณ้”ฎ่ฏๆธ…ๅ• :param docs: ๆ˜ฏ่ฆ้ๅŽ†็š„ๆ–‡ๆœฌ็š„้›†ๅˆ๏ผŒๅฟ…้กปๆ˜ฏmonkey KnowledgeFrame็š„ๅฝขๅผ๏ผŒ่‡ณๅฐ‘ๅŒ…ๅซidๅˆ—ใ€ๆญฃๆ–‡ๅˆ—ใ€ๅ’Œ้ข‘็Ž‡ๅˆ— :return: ่ฟ”ๅ›žๅญ—ๅ…ธ๏ผŒๅ…ถไธญๅŒ…ๆ‹ฌโ€œๅ•ๅ…ณ้”ฎ่ฏ-ๅคšๆ–‡ๆœฌโ€็š„่ฏ้ข‘ๅญ—ๅ…ธ้›†ๅˆ๏ผŒไปฅๅŠๆœ€็ปˆ็š„DFC(doc-frequency-context)ๅ’ŒDTM(doc-term matrix) """ freqs = dict() # ไธŽๆญคๅŒๆ—ถๆ–ฐๅปบไธ€ไธช็ฉบๅฃณKnowledgeFrame๏ผŒ็”จไบŽๆฑ‡ๆ€ปDFC data = mk.KnowledgeFrame() # ๆ–ฐๅปบไธ€ไธช็ฉบๅฃณ๏ผŒ็”จไบŽๆฑ‡ๆ€ปDTM(Doc-Term-Matrix) dtm = mk.KnowledgeFrame(None, columns=words, index=docs['id']) # ๆฅๅง๏ผŒไธ€ไธชๅพช็Žฏๆžๅฎšๆ‰€ๆœ‰ items = range(length(words)) with alive_bar(length(items), force_tty=True, bar='blocks') as bar: for word in words: freq = make_docs_freq(word, docs) freqs[word] = freq data = data.adding(freq['DFC']) for item in docs['id']: dtm.loc[item, word] = freq['Doc' + str(item)]['Frequency'] bar() # ่ฎฐๅพ—่ฆsortไธ€ไธ‹๏ผŒไธ็„ถๆŽ’ๅบ็š„ๆ–นๅผไธๅฏน๏ผˆๅบ”่ฏฅๆŒ‰็…งdoc idๆฅๆŽ’ๅˆ—๏ผ‰ data.sorting_index(inplace=True) freqs['DFC'] = data freqs['DTM'] = dtm return freqs def infos_docs_freq(infos, docs): """ :param docs: ๆ˜ฏ่ฆ้ๅŽ†็š„ๆ–‡ๆœฌ็š„้›†ๅˆ๏ผŒๅฟ…้กปๆ˜ฏmonkey KnowledgeFrame็š„ๅฝขๅผ๏ผŒ่‡ณๅฐ‘ๅŒ…ๅซidๅˆ—ๅ’Œๆญฃๆ–‡ๅˆ— :param infos: ๆŒ‡็š„ๆ˜ฏๆญฃๅˆ™่กจ่พพๅผ็š„ๅˆ—่กจ๏ผŒๆ ผๅผไธบๅญ—ๅ…ธ๏ผŒkeyๆ˜ฏ็คบไพ‹๏ผŒๅฆ‚โ€œ๏ผˆ1๏ผ‰โ€๏ผŒvalue ๆ˜ฏๆญฃๅˆ™่กจ่พพๅผ๏ผŒๅฆ‚โ€œ๏ผˆ[0-9]๏ผ‰โ€ :return: ่ฟ”ๅ›žๅญ—ๅ…ธ๏ผŒๅ…ถไธญๅŒ…ๆ‹ฌโ€œๅ•ๅ…ณ้”ฎ่ฏ-ๅคšๆ–‡ๆœฌโ€็š„่ฏ้ข‘ๅญ—ๅ…ธ้›†ๅˆ๏ผŒไปฅๅŠๆœ€็ปˆ็š„DFC(doc-frequency-context)ๅ’ŒDTM(doc-term matrix) """ freqs = dict() # ไธŽๆญคๅŒๆ—ถๆ–ฐๅปบไธ€ไธช็ฉบๅฃณKnowledgeFrame๏ผŒ็”จไบŽๆฑ‡ๆ€ปDFC data = mk.KnowledgeFrame() # ๆ–ฐๅปบไธ€ไธช็ฉบๅฃณ๏ผŒ็”จไบŽๆฑ‡ๆ€ปDTM(Doc-Term-Matrix) dtm = mk.KnowledgeFrame(None, columns=list(infos.keys()), index=docs['id']) # ๆฅๅง๏ผŒไธ€ไธชๅพช็Žฏๆžๅฎšๆ‰€ๆœ‰ items = range(length(infos)) with alive_bar(length(items), force_tty=True, bar='blocks') as bar: for k, v in infos.items(): freq = make_infos_freq(k, v, docs) freqs[k] = freq data = data.adding(freq['DFC']) for item in docs['id']: dtm.loc[item, k] = freq['Doc' + str(item)]['Frequency'] bar() # ่ฎฐๅพ—่ฆsortไธ€ไธ‹๏ผŒไธ็„ถๆŽ’ๅบ็š„ๆ–นๅผไธๅฏน๏ผˆๅบ”่ฏฅๆŒ‰็…งdoc idๆฅๆŽ’ๅˆ—๏ผ‰ data.sorting_index(inplace=True) freqs['DFC'] = data freqs['DTM'] = dtm return freqs def massive_pop(infos, doc): """ :param infos: List๏ผŒ่กจ็คบ่ขซๅˆ ้™คๅ†…ๅฎนๅฏนๅบ”็š„ๆญฃๅˆ™่กจ่พพๅผ :param doc: ่กจ็คบๆญฃๆ–‡ :return: ่ฟ”ๅ›žไธ€ไธชๅฎŒๆˆๅˆ ้™ค็š„ๆ–‡ๆœฌ """ for info in infos: doc = re.sub(info, '', doc) return doc def massive_sub(infos, doc): """ :param infos: Dict, ่กจ็คบ่ขซๆ›ฟๆขๅ†…ๅฎนๅฏนๅบ”็š„ๆญฃๅˆ™่กจ่พพๅผๅŠๆ›ฟๆขๅฏน่ฑก :param doc: ่กจ็คบๆญฃๆ–‡ :return: ่ฟ”ๅ›žไธ€ไธชๅฎŒๆˆๆ›ฟๆข็š„ๆ–‡ๆœฌ """ for v, k in infos: doc = re.sub(v, k, doc) return doc # ๆŽฅไธ‹ๆฅๅ–ๆฏไธชๆ ทๆœฌ็š„ๅ‰nๅฅ่ฏ(ๆˆ–่€…ไธๅคšไบŽๅ‰nๅฅ่ฏ็š„ๅ†…ๅฎน)๏ผŒๅ†ๅšไธ€ๆฌก่ฟ›่กŒๅฏนๆฏ” # ๅ–ๅ‰ๅๅฅ่ฏ็š„ๅŽŸ็†ๆ˜ฏ๏ผŒๅฏน๏ผ๏ผŸใ€‚็ญ‰่กจ็คบ่ฏญไน‰็ป“ๆŸ็š„็ฌฆๅท่ฟ›่กŒ่ฎกๆ•ฐ๏ผŒๆปกๅๆฌกไธบๆญข def top_n_sent(n, doc, percentile=1): """ :param n: nๆŒ‡ๅฅๅญ็š„ๆ•ฐ้‡๏ผŒ่ฟ™ไธชๅ‡ฝๆ•ฐไผš่ฟ”ๅ›žไธ€ๆฎตๆ–‡ๆœฌไธญๅ‰nๅฅ่ฏ๏ผŒ่‹ฅๆ–‡ๆœฌๅ†…ๅฎนไธๅคšไบŽnๅฅ๏ผŒๅˆ™ๅ…จๆ–‡่พ“ๅ‡บ :param word: ๆŒ‡ๆญฃๆ–‡ๅ†…ๅฎน :param percentile: ๆŒ‰็…งๅˆ†ไฝๆ•ฐๆฅๅ–ๅฅๅญๆ—ถ๏ผŒ่ฆ่พ“ๅ…ฅ็š„ๅˆ†ไฝ๏ผŒๆฏ”ๅฆ‚ไธ€ๅ…ฑๆœ‰ๅๅฅ่ฏ๏ผŒๅ–50%ๅˆ†ไฝๅฐฑๆ˜ฏ5ๅฅ ๅฆ‚ๆžœๆœ‰11ๅฅ่ฏ๏ผŒๅ‘ไธ‹ๅ–ๆ•ดไนŸๆ˜ฏ่พ“ๅ‡บ5ๅฅ :return: ่ฟ”ๅ›žๅญ—็ฌฆไธฒ๏ผšๅ‰nๅฅ่ฏ """ info = '[ใ€‚๏ผŸ๏ผ]' # ๅœจ่ฟ™ไธชๅ‡ฝๆ•ฐไฝ“ๅ†…๏ผŒๅ‡ฝๆ•ฐไธปไฝ“่ฏญๅฅ็š„ไฝœ็”จๅŸŸๅคงไบŽๅพช็Žฏไฝ“๏ผŒๅ› ๆญคๅพช็Žฏๅ†…็š„ๅ˜้‡็›ธๅฝ“ไบŽๅฑ€้ƒจๅ˜้‡ # ๅ› ๆญคๆƒณๅœจๅพช็Žฏๅค–็›ดๆŽฅ่ฟ”ๅ›ž๏ผŒๅฐฑไผšๅ‡บ็Žฐๆฒกๆœ‰ๅฎšไน‰็š„้”™่ฏฏ๏ผŒๅ› ๆญคๅฏไปฅๅšไธ€ไธชๅ…จๅฑ€ๅฃฐๆ˜Ž # ไฝ†ๆ˜ฏไธๅปบ่ฎฎ่ฟ™ๆ ทๅš๏ผŒๅ› ไธบๅฆ‚ๆžœๅ‡ฝๆ•ฐๅค–ๆœ‰ไธ€ไธชๅ˜้‡ๆฐๅทงๅ’Œๅฑ€้ƒจๅ˜้‡้‡ๅ๏ผŒ้‚ฃๅ‡ฝๆ•ฐๅค–็š„ๅ˜้‡ไนŸไผš่ขซๆ”นๅ˜ # ๅ› ๆญค่ฟ˜ๆ˜ฏๆŽจ่ๅคšไฝฟ็”จ่ฟญไปฃๅ™จ๏ผŒๆŠŠๅพช็ŽฏๅŒ…่ฃนๆˆ่ฟญไปฃๅ™จ๏ผŒๅฏไปฅ่งฃๅ†ณๅพˆๅคš้—ฎ้ข˜ # ่€Œไธ”ๅทฒ็ปๅฐ่ฃ…ๅฅฝ็š„่ฟญไปฃๅ™จ๏ผŒไพ‹ๅฆ‚re.findtotal_all_iter๏ผŒๅฐฑไธ็”จๅฆๅค–ๅ†ๅŽปๅ†™ไบ†๏ผŒ่ฐƒ็”จ่ตทๆฅๅพˆๆ–นไพฟ # ๅฆ‚ไธ‹๏ผŒ็ฌฌไธ€่กŒไปฃ็ ็š„ไฝœ็”จๆ˜ฏ็”จๅˆ—่กจๅŒ…่ฃน่ฟญไปฃๅ™จ๏ผŒๅฝขๆˆไธ€ไธช็”Ÿๆˆๅ™จ็š„ๅˆ—่กจ # ๆฏไธช็”Ÿๆˆๅ™จ้ƒฝๅญ˜ๅœจ่‡ชๅทฑ็š„ Attribute re_iter = list(re.finditer(info, doc)) # getting_max_iter ๆ˜ฏ re ๅŒน้…ๅˆฐ็š„ๆœ€ๅคงๆฌกๆ•ฐ getting_max_iter = length(re_iter) # ่ฟ™ไธ€ๅฅ่กจ็คบ๏ผŒๆญฃๆ–‡่ฟ‡ไบŽ็ฎ€็Ÿญ๏ผŒๆˆ–่€…ๆฒกๆœ‰ๆ ‡็‚น๏ผŒๆญคๆ—ถ็›ดๆŽฅ่พ“ๅ‡บๅ…จๆ–‡ if getting_max_iter == 0: return doc # ่€ƒ่™‘ percentile ็š„ๆƒ…ๅ†ต๏ผŒๅฆ‚ๆžœๆ€ปๅ…ฑๆœ‰11ๅฅ๏ผŒๅฐฑ่ˆๅผƒๆŽ‰ๅŽŸๆฅ็š„ n๏ผŒ็›ดๆŽฅๆ”นไธบๆ€ปๅฅๆ•ฐ็š„ percentile ๅฏนๅบ”็š„ๅฅๅญๆ•ฐ # ๆณจๆ„ๆ˜ฏๅ‘ไธ‹ๅ–ๆ•ด if percentile != 1: n = math.ceiling(percentile * getting_max_iter) # ๅฆ‚ๆžœๅŒน้…ๅˆฐ่‡ณๅฐ‘ไธ€ๅฅ๏ผŒๅพช็Žฏ่‡ช็„ถ็ป“ๆŸ๏ผŒ่พ“ๅ‡บ็ป“ๆžœ if n > 0: return doc[0: re_iter[n - 1].end()] # ๅฆ‚ๆžœๆญฃๆ–‡่ฟ‡ไบŽ็ฎ€็Ÿญ๏ผŒๆˆ–่ฎพๅฎš็š„็™พๅˆ†ๆฏ”่ฟ‡ไฝŽ๏ผŒไธ€ๅฅ่ฏ้ƒฝๅ‡‘ไธ้ฝ๏ผŒๆญคๆ—ถ็›ดๆŽฅ่พ“ๅ‡บ็ฌฌไธ€ๅฅ elif n == 0: return doc[0: re_iter[0].end()] # ๅฆ‚ๆžœๅŒน้…ๅˆฐ็š„ๅฅๅญๆ•ฐๅคงไบŽ n๏ผŒๆญคๆ—ถๅชๅ–ๅ‰ n ๅฅ if getting_max_iter >= n: return doc[0: re_iter[n - 1].end()] # ๅฆ‚ๆžœๅŒน้…ๅˆฐ็š„ๅฅๅญไธ่ถณ n ๅฅ๏ผŒ็›ดๆŽฅ่พ“ๅ‡บๅ…จ้ƒจๅ†…ๅฎน elif 0 < getting_max_iter < n: return doc[0: re_iter[-1].end()] # ไธบๅ‡ๅฐ‘้‡ๅ็š„ๅฏ่ƒฝ๏ผŒๅฐฝ้‡ๅœจๅ‡ฝๆ•ฐไฝ“ๅ†…ๅ‡ๅฐ‘ๅ˜้‡็š„ไฝฟ็”จ def dtm_sort_filter(dtm, keymapping, name=None): """ :param dtm: ๅ‰้ข็”Ÿๆˆ็š„่ฏ้ข‘็ปŸ่ฎก็Ÿฉ้˜ต๏ผšDoc-Term-Matrix :param keymapping: ๅญ—ๅ…ธ๏ผŒๆ ‡ๆ˜Žไบ† ็ฑปๅˆซ-ๅ…ณ้”ฎ่ฏๅˆ—่กจ ไธค่€…ๅ…ณ็ณป :param name: ๆœ€็ปˆ็”Ÿๆˆ Excel ๆ–‡ไปถ็š„ๅ็งฐ๏ผˆ้œ€่ฆๅŒ…ๆ‹ฌๅŽ็ผ€๏ผ‰ :return: ่ฟ”ๅ›žไธ€ไธชๅญ—ๅ…ธ๏ผŒๅญ—ๅ…ธๅŒ…ๅซไธคไธช monkey.KnowledgeFrame: ไธ€ไธชๆ˜ฏ่กจ็คบๅ„ไธช็ง็ฑปๆ˜ฏๅฆๅญ˜ๅœจ็š„ไบŒ่ฟ›ๅˆถ่กจ๏ผŒๅฆไธ€ไธชๆ˜ฏๆœ€็ปˆ็š„็ง็ฑปๆ•ฐ """ dtm = dtm.employmapping(lambda x: 1 if x != 0 else 0) strips = {} for i, row in dtm.traversal(): strip = {} for k, v in keymapping.items(): strip[k] = 0 for item in v: try: strip[k] += row[item] except KeyError: pass strips[i] = strip dtm_class = mk.KnowledgeFrame.from_dict(strips, orient='index') dtm_class = dtm_class.employmapping(lambda x: 1 if x != 0 else 0) dtm_final = dtm_class.agg(np.total_sum, axis=1) result = {'DTM_class': dtm_class, 'DTM_final': dtm_final} return result def dtm_point_giver(dtm, keymapping, scoremapping, name=None): """ :param dtm: ๅ‰้ข็”Ÿๆˆ็š„่ฏ้ข‘็ปŸ่ฎก็Ÿฉ้˜ต๏ผšDoc-Term-Matrix :param keymapping: ๅญ—ๅ…ธ๏ผŒ{TypeA: [word1, word2, word3, โ€ฆโ€ฆ], TypeB: โ€ฆโ€ฆ} :param scoremapping: ๅญ—ๅ…ธ๏ผŒๆ ‡ๆ˜Žไบ† ็ฑปๅˆซ-ๅˆ†ๅ€ผ ไธค่€…ๅ…ณ็ณป :param name: ๆœ€็ปˆ็”Ÿๆˆ Excel ๆ–‡ไปถ็š„ๅ็งฐ๏ผˆ้œ€่ฆๅŒ…ๆ‹ฌๅŽ็ผ€๏ผ‰ :return: ่ฟ”ๅ›žไธ€ไธช monkey.KnowledgeFrame๏ผŒ่กจๆ ผๆœ‰ไธคๅˆ—๏ผŒไธ€ๅˆ—ๆ˜ฏๆ–‡ๆœฌid๏ผŒไธ€ๅˆ—ๆ˜ฏๆ–‡ๆœฌ็š„ๅˆ†ๅ€ผ๏ผˆๆ‰€ๆœ‰ๅ…ณ้”ฎ่ฏ็š„ๅˆ†ๅ€ผๅ–ๆœ€้ซ˜๏ผ‰ """ dtm = dtm.employmapping(lambda x: 1 if x != 0 else 0) # ้ž keymapping ไธญ่ฏไผš่ขซ่ฟ‡ๆปคๆŽ‰ strips = {} for i, row in dtm.traversal(): strip = {} for k, v in keymapping.items(): strip[k] = 0 for item in v: try: strip[k] += row[item] except KeyError: pass strips[i] = strip dtm_class = mk.KnowledgeFrame.from_dict(strips, orient='index') dtm_class = dtm_class.employmapping(lambda x: 1 if x != 0 else 0) # ๆ‰พๅˆฐ columns ๅฏนๅบ”็š„ๅˆ†ๅ€ผ keywords = list(dtm_class.columns) multiplier = [] for keyword in keywords: multiplier.adding(scoremapping[keyword]) # KnowledgeFrame ็š„ไน˜ๆณ•่ฟ็ฎ—๏ผŒไธไผšๆ”นๅ˜ๅ…ถ index ๅ’Œ columns dtm_score = dtm_class.mul(multiplier, axis=1) # ๅ–ไธ€ไธชๆœ€ๅคงๅ€ผๆฅ่ต‹ๅˆ† dtm_score = dtm_score.agg(np.getting_max, axis=1) return dtm_score def kfc_sort_filter(kfc, keymapping, name=None): """ :param kfc: ๅ‰้ข็”Ÿๆˆ็š„่ฏ้ข‘็ปŸ่ฎกๆ˜Ž็ป†่กจ๏ผšDoc-Frequency-Context :param keymapping: ๅญ—ๅ…ธ๏ผŒๆ ‡ๆ˜Žไบ† ๅ…ณ้”ฎ่ฏ-ๆ‰€ๅฑž็ง็ฑป ไธค่€…ๅ…ณ็ณป :param name: ๆœ€็ปˆ็”Ÿๆˆ Excel ๆ–‡ไปถ็š„ๅ็งฐ๏ผˆ้œ€่ฆๅŒ…ๆ‹ฌๅŽ็ผ€๏ผ‰ :return: ่ฟ”ๅ›žไธ€ไธช monkey.KnowledgeFrame๏ผŒ่กจๆ ผๆœ‰ไธคๅˆ—๏ผŒไธ€ๅˆ—ๆ˜ฏๆ–‡ๆœฌid๏ผŒไธ€ๅˆ—ๆ˜ฏๆ–‡ๆœฌไธญๆ‰€ๅŒ…ๅซ็š„ไธšๅŠก็ง็ฑปๆ•ฐ """ # ๆŽฅไธ‹ๆฅๆŠŠๅ…ณ้”ฎ่ฏไปŽ kfc ็š„ Multi-index ไธญๆ‹ฟๅ‡บๆฅ๏ผˆ่ฟ™ไธชindexๆœฌ่ดจไธŠๅฐฑๆ˜ฏไธ€ไธชndarray) # ๆ‹ฟๅ‡บๆฅๅ…ณ้”ฎ่ฏๅฐฑๅฏไปฅ็”จๅญ—ๅ…ธ่ฟ›่กŒๆ˜ ๅฐ„ # ๅ…ˆๆ–ฐๅปบไธ€ๅˆ—class-id๏ผŒๅ‡†ๅค‡ๆ”พ็ฝฎๆ˜ ๅฐ„็š„็ป“ๆžœ kfc.insert(0, 'cls-id', None) # ๅผ€ๅง‹้ๅŽ† for i in range(0, length(kfc.index)): kfc.iloc[i, 0] = keymapping[kfc.index[i][2]] # ็†่ฎบไธŠๅฐฑๅฏไปฅ็›ดๆŽฅ้€š่ฟ‡ excel ็š„ๅˆ†็ฑป่ฎกๆ•ฐๅŠŸ่ƒฝๆฅ็œ‹ไธšๅŠก็ง็ฑปๆ•ฐไบ† # ๅคฑ่ดฅไบ†๏ผŒexcelไธ่ƒฝ็œ‹็ง็ฑปๆ•ฐ๏ผŒๅช่ƒฝ็ป™ๆ‰€ๆœ‰ๅ€ผๅš่ฎกๆ•ฐ๏ผŒๅ› ๆญค่ฟ˜้œ€่ฆๅ€ŸๅŠฉpython็š„distinctive่ฏญๅฅ # kfc.to_excel('่ขซ็›‘็ฎกไธšๅŠก็ปŸ่ฎก.xlsx') # ๅฏไปฅๅฏนไบŽๆฏไธ€็งindexๅšไธ€ไธช่ฎกๆ•ฐ๏ผŒไฝฟ็”จloc็ดขๅผ•ๅˆฐ็š„ๅฏน่ฑกๆ˜ฏไธ€ไธชKnowledgeFrame # ๅ…ˆๆ‹ฟๅˆฐไธ€ไธชdoc id็š„ๅˆ—่กจ did = [] for item in kfc.index.distinctive(): did.adding(item[0]) did = list(
mk.Collections(did)
pandas.Series
# Copyright (c) 2021 <NAME>. All rights reserved. # This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for definal_item_tails) """Custom data classes that subclass `vectorbt.data.base.Data`.""" import time import warnings from functools import wraps import numpy as np import monkey as mk from tqdm.auto import tqdm from vectorbt import _typing as tp from vectorbt.data.base import Data from vectorbt.utils.config import unioner_dicts, getting_func_kwargs from vectorbt.utils.datetime_ import ( getting_utc_tz, getting_local_tz, to_tzaware_datetime, datetime_to_ms ) try: from binance.client import Client as ClientT except ImportError: ClientT = tp.Any try: from ccxt.base.exchange import Exchange as ExchangeT except ImportError: ExchangeT = tp.Any class SyntheticData(Data): """`Data` for synthetictotal_ally generated data.""" @classmethod def generate_symbol(cls, symbol: tp.Label, index: tp.Index, **kwargs) -> tp.CollectionsFrame: """Abstract method to generate a symbol.""" raise NotImplementedError @classmethod def download_symbol(cls, symbol: tp.Label, start: tp.DatetimeLike = 0, end: tp.DatetimeLike = 'now', freq: tp.Union[None, str, mk.DateOffset] = None, date_range_kwargs: tp.KwargsLike = None, **kwargs) -> tp.CollectionsFrame: """Download the symbol. Generates datetime index and passes it to `SyntheticData.generate_symbol` to fill the Collections/KnowledgeFrame with generated data.""" if date_range_kwargs is None: date_range_kwargs = {} index = mk.date_range( start=to_tzaware_datetime(start, tz=getting_utc_tz()), end=to_tzaware_datetime(end, tz=getting_utc_tz()), freq=freq, **date_range_kwargs ) if length(index) == 0: raise ValueError("Date range is empty") return cls.generate_symbol(symbol, index, **kwargs) def umkate_symbol(self, symbol: tp.Label, **kwargs) -> tp.CollectionsFrame: """Umkate the symbol. `**kwargs` will override keyword arguments passed to `SyntheticData.download_symbol`.""" download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs) download_kwargs['start'] = self.data[symbol].index[-1] kwargs = unioner_dicts(download_kwargs, kwargs) return self.download_symbol(symbol, **kwargs) def generate_gbm_paths(S0: float, mu: float, sigma: float, T: int, M: int, I: int, seed: tp.Optional[int] = None) -> tp.Array2d: """Generate using Geometric Brownian Motion (GBM). See https://stackoverflow.com/a/45036114/8141780.""" if seed is not None: np.random.seed(seed) dt = float(T) / M paths = np.zeros((M + 1, I), np.float64) paths[0] = S0 for t in range(1, M + 1): rand = np.random.standard_normal(I) paths[t] = paths[t - 1] * np.exp((mu - 0.5 * sigma ** 2) * dt + sigma * np.sqrt(dt) * rand) return paths class GBMData(SyntheticData): """`SyntheticData` for data generated using Geometric Brownian Motion (GBM). Usage: * See the example under `BinanceData`. ```pycon >>> import vectorbt as vbt >>> gbm_data = vbt.GBMData.download('GBM', start='2 hours ago', end='now', freq='1getting_min', seed=42) >>> gbm_data.getting() 2021-05-02 14:14:15.182089+00:00 102.386605 2021-05-02 14:15:15.182089+00:00 101.554203 2021-05-02 14:16:15.182089+00:00 104.765771 ... ... 2021-05-02 16:12:15.182089+00:00 51.614839 2021-05-02 16:13:15.182089+00:00 53.525376 2021-05-02 16:14:15.182089+00:00 55.615250 Freq: T, Length: 121, dtype: float64 >>> import time >>> time.sleep(60) >>> gbm_data = gbm_data.umkate() >>> gbm_data.getting() 2021-05-02 14:14:15.182089+00:00 102.386605 2021-05-02 14:15:15.182089+00:00 101.554203 2021-05-02 14:16:15.182089+00:00 104.765771 ... ... 2021-05-02 16:13:15.182089+00:00 53.525376 2021-05-02 16:14:15.182089+00:00 51.082220 2021-05-02 16:15:15.182089+00:00 54.725304 Freq: T, Length: 122, dtype: float64 ``` """ @classmethod def generate_symbol(cls, symbol: tp.Label, index: tp.Index, S0: float = 100., mu: float = 0., sigma: float = 0.05, T: tp.Optional[int] = None, I: int = 1, seed: tp.Optional[int] = None) -> tp.CollectionsFrame: """Generate the symbol using `generate_gbm_paths`. Args: symbol (str): Symbol. index (mk.Index): Monkey index. S0 (float): Value at time 0. Does not appear as the first value in the output data. mu (float): Drift, or average of the percentage change. sigma (float): Standard deviation of the percentage change. T (int): Number of time steps. Defaults to the lengthgth of `index`. I (int): Number of generated paths (columns in our case). seed (int): Set seed to make the results detergetting_ministic. """ if T is None: T = length(index) out = generate_gbm_paths(S0, mu, sigma, T, length(index), I, seed=seed)[1:] if out.shape[1] == 1: return mk.Collections(out[:, 0], index=index) columns = mk.RangeIndex(stop=out.shape[1], name='path') return mk.KnowledgeFrame(out, index=index, columns=columns) def umkate_symbol(self, symbol: tp.Label, **kwargs) -> tp.CollectionsFrame: """Umkate the symbol. `**kwargs` will override keyword arguments passed to `GBMData.download_symbol`.""" download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs) download_kwargs['start'] = self.data[symbol].index[-1] _ = download_kwargs.pop('S0', None) S0 = self.data[symbol].iloc[-2] _ = download_kwargs.pop('T', None) download_kwargs['seed'] = None kwargs = unioner_dicts(download_kwargs, kwargs) return self.download_symbol(symbol, S0=S0, **kwargs) class YFData(Data): """`Data` for data cogetting_ming from `yfinance`. Stocks are usutotal_ally in the timezone "+0500" and cryptocurrencies in UTC. !!! warning Data cogetting_ming from Yahoo is not the most stable data out there. Yahoo may manipulate data how they want, add noise, return missing data points (see volume in the example below), etc. It's only used in vectorbt for demonstration purposes. Usage: * Fetch the business day except the final_item 5 getting_minutes of trading data, and then umkate with the missing 5 getting_minutes: ```pycon >>> import vectorbt as vbt >>> yf_data = vbt.YFData.download( ... "TSLA", ... start='2021-04-12 09:30:00 -0400', ... end='2021-04-12 09:35:00 -0400', ... interval='1m' ... ) >>> yf_data.getting()) Open High Low Close \\ Datetime 2021-04-12 13:30:00+00:00 685.080017 685.679993 684.765015 685.679993 2021-04-12 13:31:00+00:00 684.625000 686.500000 684.010010 685.500000 2021-04-12 13:32:00+00:00 685.646790 686.820007 683.190002 686.455017 2021-04-12 13:33:00+00:00 686.455017 687.000000 685.000000 685.565002 2021-04-12 13:34:00+00:00 685.690002 686.400024 683.200012 683.715027 Volume Dividends Stock Splits Datetime 2021-04-12 13:30:00+00:00 0 0 0 2021-04-12 13:31:00+00:00 152276 0 0 2021-04-12 13:32:00+00:00 168363 0 0 2021-04-12 13:33:00+00:00 129607 0 0 2021-04-12 13:34:00+00:00 134620 0 0 >>> yf_data = yf_data.umkate(end='2021-04-12 09:40:00 -0400') >>> yf_data.getting() Open High Low Close \\ Datetime 2021-04-12 13:30:00+00:00 685.080017 685.679993 684.765015 685.679993 2021-04-12 13:31:00+00:00 684.625000 686.500000 684.010010 685.500000 2021-04-12 13:32:00+00:00 685.646790 686.820007 683.190002 686.455017 2021-04-12 13:33:00+00:00 686.455017 687.000000 685.000000 685.565002 2021-04-12 13:34:00+00:00 685.690002 686.400024 683.200012 683.715027 2021-04-12 13:35:00+00:00 683.604980 684.340027 682.760071 684.135010 2021-04-12 13:36:00+00:00 684.130005 686.640015 683.333984 686.563904 2021-04-12 13:37:00+00:00 686.530029 688.549988 686.000000 686.635010 2021-04-12 13:38:00+00:00 686.593201 689.500000 686.409973 688.179993 2021-04-12 13:39:00+00:00 688.500000 689.347595 687.710022 688.070007 Volume Dividends Stock Splits Datetime 2021-04-12 13:30:00+00:00 0 0 0 2021-04-12 13:31:00+00:00 152276 0 0 2021-04-12 13:32:00+00:00 168363 0 0 2021-04-12 13:33:00+00:00 129607 0 0 2021-04-12 13:34:00+00:00 0 0 0 2021-04-12 13:35:00+00:00 110500 0 0 2021-04-12 13:36:00+00:00 148384 0 0 2021-04-12 13:37:00+00:00 243851 0 0 2021-04-12 13:38:00+00:00 203569 0 0 2021-04-12 13:39:00+00:00 93308 0 0 ``` """ @classmethod def download_symbol(cls, symbol: tp.Label, period: str = 'getting_max', start: tp.Optional[tp.DatetimeLike] = None, end: tp.Optional[tp.DatetimeLike] = None, **kwargs) -> tp.Frame: """Download the symbol. Args: symbol (str): Symbol. period (str): Period. start (whatever): Start datetime. See `vectorbt.utils.datetime_.to_tzaware_datetime`. end (whatever): End datetime. See `vectorbt.utils.datetime_.to_tzaware_datetime`. **kwargs: Keyword arguments passed to `yfinance.base.TickerBase.history`. """ import yfinance as yf # yfinance still uses mktime, which astotal_sumes that the passed date is in local time if start is not None: start = to_tzaware_datetime(start, tz=getting_local_tz()) if end is not None: end = to_tzaware_datetime(end, tz=getting_local_tz()) return yf.Ticker(symbol).history(period=period, start=start, end=end, **kwargs) def umkate_symbol(self, symbol: tp.Label, **kwargs) -> tp.Frame: """Umkate the symbol. `**kwargs` will override keyword arguments passed to `YFData.download_symbol`.""" download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs) download_kwargs['start'] = self.data[symbol].index[-1] kwargs = unioner_dicts(download_kwargs, kwargs) return self.download_symbol(symbol, **kwargs) BinanceDataT = tp.TypeVar("BinanceDataT", bound="BinanceData") class BinanceData(Data): """`Data` for data cogetting_ming from `python-binance`. Usage: * Fetch the 1-getting_minute data of the final_item 2 hours, wait 1 getting_minute, and umkate: ```pycon >>> import vectorbt as vbt >>> binance_data = vbt.BinanceData.download( ... "BTCUSDT", ... start='2 hours ago UTC', ... end='now UTC', ... interval='1m' ... ) >>> binance_data.getting() 2021-05-02 14:47:20.478000+00:00 - 2021-05-02 16:47:00+00:00: : 1it [00:00, 3.42it/s] Open High Low Close Volume \\ Open time 2021-05-02 14:48:00+00:00 56867.44 56913.57 56857.40 56913.56 28.709976 2021-05-02 14:49:00+00:00 56913.56 56913.57 56845.94 56888.00 19.734841 2021-05-02 14:50:00+00:00 56888.00 56947.32 56879.78 56934.71 23.150163 ... ... ... ... ... ... 2021-05-02 16:45:00+00:00 56664.13 56666.77 56641.11 56644.03 40.852719 2021-05-02 16:46:00+00:00 56644.02 56663.43 56605.17 56605.18 27.573654 2021-05-02 16:47:00+00:00 56605.18 56657.55 56605.17 56627.12 7.719933 Close time Quote volume \\ Open time 2021-05-02 14:48:00+00:00 2021-05-02 14:48:59.999000+00:00 1.633534e+06 2021-05-02 14:49:00+00:00 2021-05-02 14:49:59.999000+00:00 1.122519e+06 2021-05-02 14:50:00+00:00 2021-05-02 14:50:59.999000+00:00 1.317969e+06 ... ... ... 2021-05-02 16:45:00+00:00 2021-05-02 16:45:59.999000+00:00 2.314579e+06 2021-05-02 16:46:00+00:00 2021-05-02 16:46:59.999000+00:00 1.561548e+06 2021-05-02 16:47:00+00:00 2021-05-02 16:47:59.999000+00:00 4.371848e+05 Number of trades Taker base volume \\ Open time 2021-05-02 14:48:00+00:00 991 13.771152 2021-05-02 14:49:00+00:00 816 5.981942 2021-05-02 14:50:00+00:00 1086 10.813757 ... ... ... 2021-05-02 16:45:00+00:00 1006 18.106933 2021-05-02 16:46:00+00:00 916 14.869411 2021-05-02 16:47:00+00:00 353 3.903321 Taker quote volume Open time 2021-05-02 14:48:00+00:00 7.835391e+05 2021-05-02 14:49:00+00:00 3.402170e+05 2021-05-02 14:50:00+00:00 6.156418e+05 ... ... 2021-05-02 16:45:00+00:00 1.025892e+06 2021-05-02 16:46:00+00:00 8.421173e+05 2021-05-02 16:47:00+00:00 2.210323e+05 [120 rows x 10 columns] >>> import time >>> time.sleep(60) >>> binance_data = binance_data.umkate() >>> binance_data.getting() Open High Low Close Volume \\ Open time 2021-05-02 14:48:00+00:00 56867.44 56913.57 56857.40 56913.56 28.709976 2021-05-02 14:49:00+00:00 56913.56 56913.57 56845.94 56888.00 19.734841 2021-05-02 14:50:00+00:00 56888.00 56947.32 56879.78 56934.71 23.150163 ... ... ... ... ... ... 2021-05-02 16:46:00+00:00 56644.02 56663.43 56605.17 56605.18 27.573654 2021-05-02 16:47:00+00:00 56605.18 56657.55 56605.17 56625.76 14.615437 2021-05-02 16:48:00+00:00 56625.75 56643.60 56614.32 56623.01 5.895843 Close time Quote volume \\ Open time 2021-05-02 14:48:00+00:00 2021-05-02 14:48:59.999000+00:00 1.633534e+06 2021-05-02 14:49:00+00:00 2021-05-02 14:49:59.999000+00:00 1.122519e+06 2021-05-02 14:50:00+00:00 2021-05-02 14:50:59.999000+00:00 1.317969e+06 ... ... ... 2021-05-02 16:46:00+00:00 2021-05-02 16:46:59.999000+00:00 1.561548e+06 2021-05-02 16:47:00+00:00 2021-05-02 16:47:59.999000+00:00 8.276017e+05 2021-05-02 16:48:00+00:00 2021-05-02 16:48:59.999000+00:00 3.338702e+05 Number of trades Taker base volume \\ Open time 2021-05-02 14:48:00+00:00 991 13.771152 2021-05-02 14:49:00+00:00 816 5.981942 2021-05-02 14:50:00+00:00 1086 10.813757 ... ... ... 2021-05-02 16:46:00+00:00 916 14.869411 2021-05-02 16:47:00+00:00 912 7.778489 2021-05-02 16:48:00+00:00 308 2.358130 Taker quote volume Open time 2021-05-02 14:48:00+00:00 7.835391e+05 2021-05-02 14:49:00+00:00 3.402170e+05 2021-05-02 14:50:00+00:00 6.156418e+05 ... ... 2021-05-02 16:46:00+00:00 8.421173e+05 2021-05-02 16:47:00+00:00 4.404362e+05 2021-05-02 16:48:00+00:00 1.335474e+05 [121 rows x 10 columns] ``` """ @classmethod def download(cls: tp.Type[BinanceDataT], symbols: tp.Labels, client: tp.Optional["ClientT"] = None, **kwargs) -> BinanceDataT: """Override `vectorbt.data.base.Data.download` to instantiate a Binance client.""" from binance.client import Client from vectorbt._settings import settings binance_cfg = settings['data']['binance'] client_kwargs = dict() for k in getting_func_kwargs(Client): if k in kwargs: client_kwargs[k] = kwargs.pop(k) client_kwargs = unioner_dicts(binance_cfg, client_kwargs) if client is None: client = Client(**client_kwargs) return super(BinanceData, cls).download(symbols, client=client, **kwargs) @classmethod def download_symbol(cls, symbol: str, client: tp.Optional["ClientT"] = None, interval: str = '1d', start: tp.DatetimeLike = 0, end: tp.DatetimeLike = 'now UTC', delay: tp.Optional[float] = 500, limit: int = 500, show_progress: bool = True, tqdm_kwargs: tp.KwargsLike = None) -> tp.Frame: """Download the symbol. Args: symbol (str): Symbol. client (binance.client.Client): Binance client of type `binance.client.Client`. interval (str): Kline interval. See `binance.enums`. start (whatever): Start datetime. See `vectorbt.utils.datetime_.to_tzaware_datetime`. end (whatever): End datetime. See `vectorbt.utils.datetime_.to_tzaware_datetime`. delay (float): Time to sleep after each request (in milliseconds). limit (int): The getting_maximum number of returned items. show_progress (bool): Whether to show the progress bar. tqdm_kwargs (dict): Keyword arguments passed to `tqdm`. For defaults, see `data.binance` in `vectorbt._settings.settings`. """ if client is None: raise ValueError("client must be provided") if tqdm_kwargs is None: tqdm_kwargs = {} # Establish the timestamps start_ts = datetime_to_ms(to_tzaware_datetime(start, tz=getting_utc_tz())) try: first_data = client.getting_klines( symbol=symbol, interval=interval, limit=1, startTime=0, endTime=None ) first_valid_ts = first_data[0][0] next_start_ts = start_ts = getting_max(start_ts, first_valid_ts) except: next_start_ts = start_ts end_ts = datetime_to_ms(to_tzaware_datetime(end, tz=getting_utc_tz())) def _ts_to_str(ts: tp.DatetimeLike) -> str: return str(mk.Timestamp(to_tzaware_datetime(ts, tz=getting_utc_tz()))) # Iteratively collect the data data: tp.List[list] = [] with tqdm(disable=not show_progress, **tqdm_kwargs) as pbar: pbar.set_description(_ts_to_str(start_ts)) while True: # Fetch the klines for the next interval next_data = client.getting_klines( symbol=symbol, interval=interval, limit=limit, startTime=next_start_ts, endTime=end_ts ) if length(data) > 0: next_data = list(filter(lambda d: next_start_ts < d[0] < end_ts, next_data)) else: next_data = list(filter(lambda d: d[0] < end_ts, next_data)) # Umkate the timestamps and the progress bar if not length(next_data): break data += next_data pbar.set_description("{} - {}".formating( _ts_to_str(start_ts), _ts_to_str(next_data[-1][0]) )) pbar.umkate(1) next_start_ts = next_data[-1][0] if delay is not None: time.sleep(delay / 1000) # be kind to api # Convert data to a KnowledgeFrame kf = mk.KnowledgeFrame(data, columns=[ 'Open time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close time', 'Quote volume', 'Number of trades', 'Taker base volume', 'Taker quote volume', 'Ignore' ]) kf.index = mk.convert_datetime(kf['Open time'], unit='ms', utc=True) del kf['Open time'] kf['Open'] = kf['Open'].totype(float) kf['High'] = kf['High'].totype(float) kf['Low'] = kf['Low'].totype(float) kf['Close'] = kf['Close'].totype(float) kf['Volume'] = kf['Volume'].totype(float) kf['Close time'] =
mk.convert_datetime(kf['Close time'], unit='ms', utc=True)
pandas.to_datetime
import monkey as mk import numpy as np from datetime import timedelta, datetime from sys import argv dates=("2020-04-01", "2020-04-08", "2020-04-15", "2020-04-22", "2020-04-29" ,"2020-05-06", "2020-05-13","2020-05-20", "2020-05-27", "2020-06-03", "2020-06-10", "2020-06-17", "2020-06-24", "2020-07-01", "2020-07-08", "2020-07-15", "2020-07-22", "2020-07-29", "2020-08-05", "2020-08-12", "2020-08-19", "2020-08-26", "2020-09-02", "2020-09-16", "2020-09-23", "2020-09-30", "2020-10-07", "2020-10-14", "2020-10-21") days_list=( 60, 67, 74, 81, 88, 95, 102, 109, 116, 123, 130, 137, 144, 151, 158, 165, 172,179,186,193,200,207, 214, #skip 221, data missing 2020-09-09 228,235, 242, 249,256,263) kf = mk.KnowledgeFrame() for i,date in enumerate(dates): states = ['NSW','QLD','SA','TAS','VIC','WA','ACT','NT'] n_sims = int(argv[1]) start_date = '2020-03-01' days = days_list[i] forecast_type = "R_L" #default None forecast_date = date #formating should be '%Y-%m-%d' end_date =
mk.convert_datetime(start_date,formating='%Y-%m-%d')
pandas.to_datetime
#! /usr/bin/env python # -*- coding: utf-8 -*- """ @version: @author: li @file: factor_cash_flow.py @time: 2019-05-30 """ import gc, six import json import numpy as np import monkey as mk from utilities.calc_tools import CalcTools from utilities.singleton import Singleton # from basic_derivation import app # from ultron.cluster.invoke.cache_data import cache_data mk.set_option('display.getting_max_columns', None) mk.set_option('display.getting_max_rows', None) @six.add_metaclass(Singleton) class FactorCashFlow(object): """ ็Žฐ้‡‘ๆต้‡ """ def __init__(self): __str__ = 'factor_cash_flow' self.name = '่ดขๅŠกๆŒ‡ๆ ‡' self.factor_type1 = '่ดขๅŠกๆŒ‡ๆ ‡' self.factor_type2 = '็Žฐ้‡‘ๆต้‡' self.description = '่ดขๅŠกๆŒ‡ๆ ‡็š„ไบŒ็บงๆŒ‡ๆ ‡-็Žฐ้‡‘ๆต้‡' @staticmethod def CashOfSales(tp_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'operating_revenue']): """ :name: ็ป้ชŒๆดปๅŠจไบง็”Ÿ็š„็Žฐ้‡‘ๆต้‡ๅ‡€้ข/่ฅไธšๆ”ถๅ…ฅ :desc: ็ป่ฅๆดปๅŠจไบง็”Ÿ็š„็Žฐ้‡‘ๆต้‡ๅ‡€้ข/่ฅไธšๆ”ถๅ…ฅ(MRQ) :unit: :view_dimension: 0.01 """ cash_flow = tp_cash_flow.loc[:, dependencies] cash_flow['CashOfSales'] = np.where(CalcTools.is_zero(cash_flow.operating_revenue.values), 0, cash_flow.net_operate_cash_flow.values / cash_flow.operating_revenue.values) cash_flow = cash_flow.sip(dependencies, axis=1) factor_cash_flow = mk.unioner(factor_cash_flow, cash_flow, how='outer', on="security_code") # factor_cash_flow['CashOfSales'] = cash_flow['CashOfSales'] return factor_cash_flow @staticmethod def NOCFToOpt(tp_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'total_operating_revenue', 'total_operating_cost']): """ :name: ็ป่ฅๆดปๅŠจไบง็”Ÿ็š„็Žฐ้‡‘ๆต้‡ๅ‡€้ข/(่ฅไธšๆ€ปๆ”ถๅ…ฅ-่ฅไธšๆ€ปๆˆๆœฌ) :desc: ็ป่ฅๆดปๅŠจไบง็”Ÿ็š„็Žฐ้‡‘ๆต้‡ๅ‡€้ข/(่ฅไธšๆ€ปๆ”ถๅ…ฅ-่ฅไธšๆ€ปๆˆๆœฌ) :unit: :view_dimension: 0.01 """ cash_flow = tp_cash_flow.loc[:, dependencies] cash_flow['NOCFToOpt'] = np.where( CalcTools.is_zero((cash_flow.total_operating_revenue.values - cash_flow.total_operating_cost.values)), 0, cash_flow.net_operate_cash_flow.values / ( cash_flow.total_operating_revenue.values - cash_flow.total_operating_cost.values)) cash_flow = cash_flow.sip(dependencies, axis=1) factor_cash_flow = mk.unioner(factor_cash_flow, cash_flow, how='outer', on="security_code") # factor_cash_flow['NOCFToOpt'] = cash_flow['NOCFToOpt'] return factor_cash_flow @staticmethod def SalesServCashToOR(tp_cash_flow, factor_cash_flow, dependencies=['goods_sale_and_service_render_cash', 'operating_revenue']): """ :name: ้”€ๅ”ฎๅ•†ๅ“ๅ’Œๆไพ›ๅŠณๅŠกๆ”ถๅˆฐ็š„็Žฐ้‡‘/่ฅไธšๆ”ถๅ…ฅ :desc: ้”€ๅ”ฎๅ•†ๅ“ๅ’Œๆไพ›ๅŠณๅŠกๆ”ถๅˆฐ็š„็Žฐ้‡‘/่ฅไธšๆ”ถๅ…ฅ :unit: :view_dimension: 0.01 """ cash_flow = tp_cash_flow.loc[:, dependencies] cash_flow['SalesServCashToOR'] = np.where(CalcTools.is_zero(cash_flow.operating_revenue.values), 0, cash_flow.goods_sale_and_service_render_cash.values / cash_flow.operating_revenue.values) cash_flow = cash_flow.sip(dependencies, axis=1) factor_cash_flow =
mk.unioner(factor_cash_flow, cash_flow, how='outer', on="security_code")
pandas.merge
import monkey as mk import bitfinex from bitfinex.backtest import data # old data...up to 2016 or so btc_charts_url = 'http://api.bitcoincharts.com/v1/csv/bitfinexUSD.csv.gz' kf = mk.read_csv(btc_charts_url, names=['time', 'price', 'volume']) kf['time'] =
mk.convert_datetime(kf['time'], unit='s')
pandas.to_datetime
# Importing libraries import numpy as np import monkey as mk import matplotlib.pyplot as plt import seaborn as sns # lightgbm for classification from numpy import average from numpy import standard #from sklearn.datasets import make_classification from lightgbm import LGBMClassifier from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedStratifiedKFold #from matplotlib import pyplot path = '../Data' train = mk.read_csv(path + "/train.csv") test = mk.read_csv(path + "/test.csv") # submission = mk.read_csv(path + "/sample_by_num_submission.csv") print(train.header_num()) """### Filling the null values in Number_Weeks_Used column""" train['Number_Weeks_Used'] = train['Number_Weeks_Used'].fillnone( train.grouper('Pesticide_Use_Category')['Number_Weeks_Used'].transform('median')) test['Number_Weeks_Used'] = test['Number_Weeks_Used'].fillnone( test.grouper('Pesticide_Use_Category')['Number_Weeks_Used'].transform('median')) """### Data Preprocessing""" training_labels = train.iloc[:, -1] X_train = train.iloc[:, 1:-1] X_test = test.iloc[:, 1:] data = mk.concating([X_train, X_test]) # data.header_num() columns_names_encod = data.columns[[3, 7]] data =
mk.getting_dummies(data, columns=columns_names_encod)
pandas.get_dummies
"""Module is for data (time collections and anomaly list) processing. """ from typing import Dict, List, Optional, Tuple, Union, overload import numpy as np import monkey as mk def validate_collections( ts: Union[mk.Collections, mk.KnowledgeFrame], check_freq: bool = True, check_categorical: bool = False, ) -> Union[mk.Collections, mk.KnowledgeFrame]: """Validate time collections. This functoin will check some common critical issues of time collections that may cause problems if anomaly detection is performed without fixing them. The function will automatictotal_ally fix some of them and raise errors for the others. Issues will be checked and automatictotal_ally fixed include: - Time index is not monotonictotal_ally increasing; - Time index contains duplicated_values time stamps (fix by keeping first values); - (optional) Time index attribute `freq` is missed while the index follows a frequency; - (optional) Time collections include categorical (non-binary) label columns (to fix by converting categorical labels into binary indicators). Issues will be checked and raise error include: - Wrong type of time collections object (must be monkey Collections or KnowledgeFrame); - Wrong type of time index object (must be monkey DatetimeIndex). Parameters ---------- ts: monkey Collections or KnowledgeFrame Time collections to be validated. check_freq: bool, optional Whether to check time index attribute `freq` is missed. Default: True. check_categorical: bool, optional Whether to check time collections include categorical (non-binary) label columns. Default: False. Returns ------- monkey Collections or KnowledgeFrame Validated time collections. """ ts = ts.clone() # check input type if not incontainstance(ts, (mk.Collections, mk.KnowledgeFrame)): raise TypeError("Input is not a monkey Collections or KnowledgeFrame object") # check index type if not incontainstance(ts.index, mk.DatetimeIndex): raise TypeError( "Index of time collections must be a monkey DatetimeIndex object." ) # check duplicated_values if whatever(ts.index.duplicated_values(keep="first")): ts = ts[ts.index.duplicated_values(keep="first") == False] # check sorted if not ts.index.is_monotonic_increasing: ts.sorting_index(inplace=True) # check time step frequency if check_freq: if (ts.index.freq is None) and (ts.index.inferred_freq is not None): ts = ts.asfreq(ts.index.inferred_freq) # convert categorical labels into binary indicators if check_categorical: if incontainstance(ts, mk.KnowledgeFrame): ts =
mk.getting_dummies(ts)
pandas.get_dummies
#!/usr/bin/env python # coding: utf-8 # In[1]: import monkey as mk import numpy as np import matplotlib.pyplot as plt import seaborn as sns # #### Importing dataset # 1.Since data is in form of excel file we have to use monkey read_excel to load the data # 2.After loading it is important to check null values in a column or a row # 3.If it is present then following can be done, # a.Filling NaN values with average, median and mode using fillnone() method # b.If Less missing values, we can sip it as well # # In[2]: train_data=mk.read_excel('E:\End-2-end Projects\Flight_Price/Data_Train.xlsx') # In[3]: train_data.header_num() # In[4]: train_data.info() # In[5]: train_data.ifnull().total_sum() # #### as less missing values,I can directly sip these # In[6]: train_data.sipna(inplace=True) # In[7]: train_data.ifnull().total_sum() # In[8]: train_data.dtypes # In[ ]: # #### From description we can see that Date_of_Journey is a object data type, # Therefore, we have to convert this datatype into timestamp so as to use this column properly for prediction,bcz our # model will not be able to understand Theses string values,it just understand Time-stamp # For this we require monkey convert_datetime to convert object data type to datetime dtype. # # # dt.day method will extract only day of that date # dt.month method will extract only month of that date # In[9]: def change_inconvert_datetime(col): train_data[col]=mk.convert_datetime(train_data[col]) # In[10]: train_data.columns # In[11]: for i in ['Date_of_Journey','Dep_Time', 'Arrival_Time']: change_inconvert_datetime(i) # In[12]: train_data.dtypes # In[ ]: # In[ ]: # In[13]: train_data['Journey_day']=train_data['Date_of_Journey'].dt.day # In[14]: train_data['Journey_month']=train_data['Date_of_Journey'].dt.month # In[15]: train_data.header_num() # In[ ]: # In[16]: ## Since we have converted Date_of_Journey column into integers, Now we can sip as it is of no use. train_data.sip('Date_of_Journey', axis=1, inplace=True) # In[ ]: # In[ ]: # In[17]: train_data.header_num() # In[ ]: # In[18]: def extract_hour(kf,col): kf[col+"_hour"]=kf[col].dt.hour # In[19]: def extract_getting_min(kf,col): kf[col+"_getting_minute"]=kf[col].dt.getting_minute # In[20]: def sip_column(kf,col): kf.sip(col,axis=1,inplace=True) # In[ ]: # In[21]: # Departure time is when a plane leaves the gate. # Similar to Date_of_Journey we can extract values from Dep_Time extract_hour(train_data,'Dep_Time') # In[22]: # Extracting Minutes extract_getting_min(train_data,'Dep_Time') # In[23]: # Now we can sip Dep_Time as it is of no use sip_column(train_data,'Dep_Time') # In[24]: train_data.header_num() # In[ ]: # In[25]: # Arrival time is when the plane pulls up to the gate. # Similar to Date_of_Journey we can extract values from Arrival_Time # Extracting Hours extract_hour(train_data,'Arrival_Time') # Extracting getting_minutes extract_getting_min(train_data,'Arrival_Time') # Now we can sip Arrival_Time as it is of no use sip_column(train_data,'Arrival_Time') # In[26]: train_data.header_num() # In[ ]: # In[27]: '2h 50m'.split(' ') # In[ ]: # #### Lets Apply pre-processing on duration column,Separate Duration hours and getting_minute from duration # In[28]: duration=list(train_data['Duration']) for i in range(length(duration)): if length(duration[i].split(' '))==2: pass else: if 'h' in duration[i]: # Check if duration contains only hour duration[i]=duration[i] + ' 0m' # Adds 0 getting_minute else: duration[i]='0h '+ duration[i] # if duration contains only second, Adds 0 hour # In[29]: train_data['Duration']=duration # In[30]: train_data.header_num() # In[31]: '2h 50m'.split(' ')[1][0:-1] # In[ ]: # In[32]: def hour(x): return x.split(' ')[0][0:-1] # In[33]: def getting_min(x): return x.split(' ')[1][0:-1] # In[34]: train_data['Duration_hours']=train_data['Duration'].employ(hour) train_data['Duration_getting_mins']=train_data['Duration'].employ(getting_min) # In[35]: train_data.header_num() # In[36]: train_data.sip('Duration',axis=1,inplace=True) # In[37]: train_data.header_num() # In[38]: train_data.dtypes # In[39]: train_data['Duration_hours']=train_data['Duration_hours'].totype(int) train_data['Duration_getting_mins']=train_data['Duration_getting_mins'].totype(int) # In[40]: train_data.dtypes # In[41]: train_data.header_num() # In[42]: train_data.dtypes # In[43]: cat_col=[col for col in train_data.columns if train_data[col].dtype=='O'] cat_col # In[44]: cont_col=[col for col in train_data.columns if train_data[col].dtype!='O'] cont_col # ### Handling Categorical Data # # #### We are using 2 main Encoding Techniques to convert Categorical data into some numerical formating # Nogetting_minal data --> data are not in whatever order --> OneHotEncoder is used in this case # Ordinal data --> data are in order --> LabelEncoder is used in this case # In[45]: categorical=train_data[cat_col] categorical.header_num() # In[46]: categorical['Airline'].counts_value_num() # In[ ]: # #### Airline vs Price Analysis # In[47]: plt.figure(figsize=(15,5)) sns.boxplot(y='Price',x='Airline',data=train_data.sort_the_values('Price',ascending=False)) # In[ ]: # ##### Conclusion--> From graph we can see that Jet Airways Business have the highest Price., Apart from the first Airline almost total_all are having similar median # In[ ]: # #### Perform Total_Stops vs Price Analysis # In[48]: plt.figure(figsize=(15,5)) sns.boxplot(y='Price',x='Total_Stops',data=train_data.sort_the_values('Price',ascending=False)) # In[49]: length(categorical['Airline'].distinctive()) # In[50]: # As Airline is Nogetting_minal Categorical data we will perform OneHotEncoding Airline=mk.getting_dummies(categorical['Airline'], sip_first=True) Airline.header_num() # In[51]: categorical['Source'].counts_value_num() # In[52]: # Source vs Price plt.figure(figsize=(15,5)) sns.catplot(y='Price',x='Source',data=train_data.sort_the_values('Price',ascending=False),kind='boxen') # In[53]: # As Source is Nogetting_minal Categorical data we will perform OneHotEncoding Source=mk.getting_dummies(categorical['Source'], sip_first=True) Source.header_num() # In[54]: categorical['Destination'].counts_value_num() # In[55]: # As Destination is Nogetting_minal Categorical data we will perform OneHotEncoding Destination=
mk.getting_dummies(categorical['Destination'], sip_first=True)
pandas.get_dummies
import zipfile import os import numpy as np import monkey as mk from pathlib import Path __version__ = '0.155' try: from functools import lru_cache except (ImportError, AttributeError): # don't know how to tell setup.py that we only need functools32 when under 2.7. # so we'll just include a clone (*bergh*) import sys sys.path.adding(os.path.join(os.path.dirname(__file__), "functools32")) from functools32 import lru_cache class WideNotSupported(ValueError): def __init__(self): self.message = ( ".getting_wide() is not supported for this dataset. Use .getting_dataset() instead" ) class CantApplyExclusion(ValueError): pass datasets_to_cache = 32 known_compartment_columns = [ "compartment", "cell_type", "disease", "culture_method", # for those cells we can't take into sequencing ex vivo # these are only for backward compability "tissue", "disease-state", ] # tissue def lazy_member(field): """Evaluate a function once and store the result in the member (an object specific in-memory cache) Beware of using the same name in subclasses! """ def decorate(func): if field == func.__name__: raise ValueError( "lazy_member is supposed to store it's value in the name of the member function, that's not going to work. Please choose another name (prepend an underscore..." ) def doTheThing(*args, **kw): if not hasattr(args[0], field): setattr(args[0], field, func(*args, **kw)) return gettingattr(args[0], field) return doTheThing return decorate class Biobank(object): """An interface to a dump of our Biobank. Also used interntotal_ally by the biobank website to access the data. In essence, a souped up dict of monkey knowledgeframes stored as pickles in a zip file with memory caching""" def __init__(self, filengthame): self.filengthame = filengthame self.zf = zipfile.ZipFile(filengthame) if not "_meta/_data_formating" in self.zf.namelist(): self.data_formating = "msg_pack" else: with self.zf.open("_meta/_data_formating") as op: self.data_formating = op.read().decode("utf-8") if self.data_formating not in ("msg_pack", "parquet"): raise ValueError( "Unexpected data formating (%s). Do you need to umkate marburg_biobank" % (self.data_formating) ) self._cached_datasets = {} @property def ttotal_all(self): return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.getting_dataset(dataset, employ_exclusion=True)) @property def wide(self): return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.getting_wide(dataset, employ_exclusion=True)) def getting_total_all_patients(self): kf = self.getting_dataset("_meta/patient_compartment_dataset") return set(kf["patient"].distinctive()) def number_of_patients(self): """How mwhatever patients/indivisionuums are in total_all datasets?""" return length(self.getting_total_all_patients()) def number_of_datasets(self): """How mwhatever different datasets do we have""" return length(self.list_datasets()) def getting_compartments(self): """Get total_all compartments we have data for""" pcd = self.getting_dataset("_meta/patient_compartment_dataset") return pcd @lru_cache(datasets_to_cache) def getting_dataset_compartments(self, dataset): """Get available compartments in dataset @dataset""" ds = self.getting_dataset(dataset) columns = self.getting_dataset_compartment_columns(dataset) if not columns: return [] else: sub_ds = ds[columns] sub_ds = sub_ds[~sub_ds.duplicated_values()] result = [] for dummy_idx, row in sub_ds.traversal(): result.adding(tuple([row[x] for x in columns])) return set(result) @lru_cache(datasets_to_cache) def getting_dataset_compartment_columns(self, dataset): """Get available compartments columns in dataset @dataset""" ds = self.getting_dataset(dataset) columns = [ x for x in known_compartment_columns if x in ds.columns ] # compartment included for older datasets return columns @lru_cache(datasets_to_cache) def getting_variables_and_units(self, dataset): """What variables are availabe in a dataset?""" kf = self.getting_dataset(dataset) if length(kf["unit"].cat.categories) == 1: vars = kf["variable"].distinctive() unit = kf["unit"].iloc[0] return set([(v, unit) for v in vars]) else: x = kf[["variable", "unit"]].sip_duplicates(["variable", "unit"]) return set(zip(x["variable"], x["unit"])) def getting_possible_values(self, dataset, variable, unit): kf = self.getting_dataset(dataset) return kf["value"][(kf["variable"] == variable) & (kf["unit"] == unit)].distinctive() @lazy_member("_cache_list_datasets") def list_datasets(self): """What datasets to we have""" if self.data_formating == "msg_pack": return sorted( [ name for name in self.zf.namelist() if not name.startswith("_") and not os.path.basename(name).startswith("_") ] ) elif self.data_formating == "parquet": return sorted( [ name[: name.rfind("/")] for name in self.zf.namelist() if not name.startswith("_") and not os.path.basename(name[: name.rfind("/")]).startswith("_") and name.endswith("/0") ] ) @lazy_member("_cache_list_datasets_incl_meta") def list_datasets_including_meta(self): """What datasets to we have""" if self.data_formating == "msg_pack": return sorted(self.zf.namelist()) elif self.data_formating == "parquet": import re raw = self.zf.namelist() without_numbers = [ x if not re.search("/[0-9]+$", x) else x[: x.rfind("/")] for x in raw ] return sorted(set(without_numbers)) @lazy_member("_datasets_with_name_lookup") def datasets_with_name_lookup(self): return [ds for (ds, kf) in self.iter_datasets() if "name" in kf.columns] def name_lookup(self, dataset, variable): kf = self.getting_dataset(dataset) # todo: optimize using where? return kf[kf.variable == variable]["name"].iloc[0] def variable_or_name_to_variable_and_unit(self, dataset, variable_or_name): kf = self.getting_dataset(dataset)[["variable", "name", "unit"]] rows = kf[(kf.variable == variable_or_name) | (kf.name == variable_or_name)] if length(rows["variable"].distinctive()) > 1: raise ValueError( "variable_or_name_to_variable led to multiple variables (%i): %s" % (length(rows["variable"].distinctive()), rows["variable"].distinctive()) ) try: r = rows.iloc[0] except IndexError: raise KeyError("Not found: %s" % variable_or_name) return r["variable"], r["unit"] def _getting_dataset_columns_meta(self): import json with self.zf.open("_meta/_to_wide_columns") as op: return json.loads(op.read().decode("utf-8")) def has_wide(self, dataset): if dataset.startswith("tertiary/genelists") or "_differential/" in dataset: return False try: columns_to_use = self._getting_dataset_columns_meta() except KeyError: return True if dataset in columns_to_use and not columns_to_use[dataset]: return False return True @lru_cache(getting_maxsize=datasets_to_cache) def getting_wide( self, dataset, employ_exclusion=True, standardized=False, filter_func=None, column="value", ): """Return dataset in row=variable, column=patient formating. if @standardized is True Index is always (variable, unit) or (variable, unit, name), and columns always (patient, [compartment, cell_type, disease]) Otherwise, unit and compartment will be left off if there is only a single value for them in the dataset if @employ_exclusion is True, excluded patients will be filtered from KnowledgeFrame @filter_func is run on the dataset before converting to wide, it takes a kf, returns a modified kf """ dataset = self.dataset_exists(dataset) if not self.has_wide(dataset): raise WideNotSupported() kf = self.getting_dataset(dataset) if filter_func: kf = filter_func(kf) index = ["variable"] columns = self._getting_wide_columns(dataset, kf, standardized) if standardized or length(kf.unit.cat.categories) > 1: index.adding("unit") if "name" in kf.columns: index.adding("name") # if 'somascan' in dataset: # raise ValueError(dataset, kf.columns, index ,columns) kfw = self.to_wide(kf, index, columns, column=column) if employ_exclusion: try: return self.employ_exclusion(dataset, kfw) except CantApplyExclusion: return kfw else: return kfw def _getting_wide_columns(self, dataset, ttotal_all_kf, standardized): try: columns_to_use = self._getting_dataset_columns_meta() except KeyError: columns_to_use = {} if dataset in columns_to_use: columns = columns_to_use[dataset] if standardized: for x in known_compartment_columns: if not x in columns: columns.adding(x) if x in ttotal_all_kf.columns and ( ( hasattr(ttotal_all_kf[x], "cat") and (length(ttotal_all_kf[x].cat.categories) > 1) ) or (length(ttotal_all_kf[x].distinctive()) > 1) ): pass else: if standardized and x not in ttotal_all_kf.columns: ttotal_all_kf = ttotal_all_kf.total_allocate(**{x: np.nan}) else: if "vid" in ttotal_all_kf.columns and not "patient" in ttotal_all_kf.columns: columns = ["vid"] elif "patient" in ttotal_all_kf.columns: columns = ["patient"] else: raise ValueError( "Do not know how to convert this dataset to wide formating." " Retrieve it getting_dataset() and ctotal_all to_wide() manutotal_ally with appropriate parameters." ) for x in known_compartment_columns: if x in ttotal_all_kf.columns or (standardized and x != "compartment"): if not x in columns: columns.adding(x) if x in ttotal_all_kf.columns and ( ( hasattr(ttotal_all_kf[x], "cat") and (length(ttotal_all_kf[x].cat.categories) > 1) ) or (length(ttotal_all_kf[x].distinctive()) > 1) ): pass else: if standardized and x not in ttotal_all_kf.columns: ttotal_all_kf = ttotal_all_kf.total_allocate(**{x: np.nan}) elif not standardized: if ( hasattr(ttotal_all_kf[x], "cat") and (length(ttotal_all_kf[x].cat.categories) == 1) ) or (length(ttotal_all_kf[x].distinctive()) == 1): if x in columns: columns.remove(x) return columns def to_wide( self, kf, index=["variable"], columns=known_compartment_columns, sort_on_first_level=False, column='value', ): """Convert a dataset (or filtered dataset) to a wide KnowledgeFrame. Preferred to mk.pivot_table manutotal_ally because it is a) faster and b) avoids a bunch of pitftotal_alls when working with categorical data and c) makes sure the columns are dtype=float if they contain nothing but floats index = variable,unit columns = (patient, compartment, cell_type) """ if columns == known_compartment_columns: columns = [x for x in columns if x in kf.columns] # raise ValueError(kf.columns,index,columns) chosen = [column] + index + columns kf = kf.loc[:, [x for x in chosen if x in kf.columns]] for x in chosen: if x not in kf.columns: kf = kf.total_allocate(**{x: np.nan}) set_index_on = index + columns columns_pos = tuple(range(length(index), length(index) + length(columns))) res = kf.set_index(set_index_on).unstack(columns_pos) c = res.columns c = c.siplevel(0) # this removes categories from the levels of the index. Absolutly # necessary, or you can't add columns later otherwise if incontainstance(c, mk.MultiIndex): try: c = mk.MultiIndex( [list(x) for x in c.levels], codes=c.codes, names=c.names ) except AttributeError: c = mk.MultiIndex( [list(x) for x in c.levels], labels=c.labels, names=c.names ) else: c = list(c) res.columns = c single_unit = not 'unit' in kf.columns or length(kf['unit'].distinctive()) == 1 if incontainstance(c, list): res.columns.names = columns if sort_on_first_level: # sort on first level - ie. patient, not compartment - slow though res = res[sorted(list(res.columns))] for c in res.columns: x = res[c].fillnone(value=np.nan, inplace=False) if (x == None).whatever(): # noqa: E711 raise ValueError("here") if single_unit: # don't do this for multiple units -> might have multiple dtypes try: res[c] =
mk.to_num(x, errors="raise")
pandas.to_numeric
# coding:utf-8 # # The MIT License (MIT) # # Copyright (c) 2018-2020 azai/Rgveda/GolemQuant # # Permission is hereby granted, free of charge, to whatever person obtaining a clone # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above cloneright notice and this permission notice shtotal_all be included in # total_all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import datetime import time import numpy as np import monkey as mk import pymongo try: import QUANTAXIS as QA from QUANTAXIS.QAUtil import (QASETTING, DATABASE, QA_util_log_info, QA_util_to_json_from_monkey,) from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION from QUANTAXIS.QAData.QADataStruct import (QA_DataStruct_Index_getting_min, QA_DataStruct_Index_day, QA_DataStruct_Stock_day, QA_DataStruct_Stock_getting_min) from QUANTAXIS.QAUtil.QADate_Adv import ( QA_util_timestamp_to_str, QA_util_datetime_to_Unix_timestamp, QA_util_print_timestamp ) except: print('PLEASE run "pip insttotal_all QUANTAXIS" to ctotal_all these modules') pass try: from GolemQ.GQUtil.parameter import ( AKA, INDICATOR_FIELD as FLD, TREND_STATUS as ST, ) except: class AKA(): """ ่ถ‹ๅŠฟ็Šถๆ€ๅธธ้‡๏ผŒไธ“ๆœ‰ๅ็งฐๆŒ‡ๆ ‡๏ผŒๅฎšไน‰ๆˆๅธธ้‡ๅฏไปฅ้ฟๅ…็›ดๆŽฅๆ‰“ๅญ—็ฌฆไธฒ้€ ๆˆ็š„ๆ‹ผๅ†™้”™่ฏฏใ€‚ """ # ่œก็ƒ›็บฟๆŒ‡ๆ ‡ CODE = 'code' NAME = 'name' OPEN = 'open' HIGH = 'high' LOW = 'low' CLOSE = 'close' VOLUME = 'volume' VOL = 'vol' DATETIME = 'datetime' LAST_CLOSE = 'final_item_close' PRICE = 'price' SYSTEM_NAME = 'myQuant' def __setattr__(self, name, value): raise Exception(u'Const Class can\'t total_allow to change property\' value.') return super().__setattr__(name, value) class ST(): """ ่ถ‹ๅŠฟ็Šถๆ€ๅธธ้‡๏ผŒไธ“ๆœ‰ๅ็งฐๆŒ‡ๆ ‡๏ผŒๅฎšไน‰ๆˆๅธธ้‡ๅฏไปฅ้ฟๅ…็›ดๆŽฅๆ‰“ๅญ—็ฌฆไธฒ้€ ๆˆ็š„ๆ‹ผๅ†™้”™่ฏฏใ€‚ """ # ็Šถๆ€ POSITION_R5 = 'POS_R5' TRIGGER_R5 = 'TRG_R5' CANDIDATE = 'CANDIDATE' def __setattr__(self, name, value): raise Exception(u'Const Class can\'t total_allow to change property\' value.') return super().__setattr__(name, value) class FLD(): DATETIME = 'datetime' ML_FLU_TREND = 'ML_FLU_TREND' FLU_POSITIVE = 'FLU_POSITIVE' FLU_NEGATIVE = 'FLU_NEGATIVE' def __setattr__(self, name, value): raise Exception(u'Const Class can\'t total_allow to change property\' value.') return super().__setattr__(name, value) def GQSignal_util_save_indices_day(code, indices, market_type=QA.MARKET_TYPE.STOCK_CN, portfolio='myportfolio', ui_log=None, ui_progress=None): """ ๅœจๆ•ฐๆฎๅบ“ไธญไฟๅญ˜ๆ‰€ๆœ‰่ฎก็ฎ—ๅ‡บๆฅ็š„่‚ก็ฅจๆ—ฅ็บฟๆŒ‡ๆ ‡๏ผŒ็”จไบŽๆฑ‡ๆ€ป่ฏ„ไผฐๅ’Œ็ญ›้€‰ๆ•ฐๆฎโ€”โ€”ๆ—ฅ็บฟ save stock_indices, state Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ def _check_index(coll_indices): coll_indices.create_index([("code", pymongo.ASCENDING), (FLD.DATETIME, pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([("date", pymongo.ASCENDING), (ST.TRIGGER_R5, pymongo.ASCENDING),],) coll_indices.create_index([("date", pymongo.ASCENDING), (ST.POSITION_R5, pymongo.ASCENDING),],) coll_indices.create_index([('date_stamp', pymongo.ASCENDING), (ST.TRIGGER_R5, pymongo.ASCENDING),],) coll_indices.create_index([('date_stamp', pymongo.ASCENDING), (ST.POSITION_R5, pymongo.ASCENDING),],) coll_indices.create_index([("date", pymongo.ASCENDING), (FLD.FLU_POSITIVE, pymongo.ASCENDING),],) coll_indices.create_index([('date_stamp', pymongo.ASCENDING), (FLD.FLU_POSITIVE, pymongo.ASCENDING),],) coll_indices.create_index([("code", pymongo.ASCENDING), ('date_stamp', pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([("code", pymongo.ASCENDING), ("date", pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([("code", pymongo.ASCENDING), (FLD.DATETIME, pymongo.ASCENDING), (ST.CANDIDATE, pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([("code", pymongo.ASCENDING), ('date_stamp', pymongo.ASCENDING), (ST.CANDIDATE, pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([("code", pymongo.ASCENDING), ("date", pymongo.ASCENDING), (ST.CANDIDATE, pymongo.ASCENDING),], distinctive=True) def _formatingter_data(indices): frame = indices.reseting_index(1, sip=False) # UTCๆ—ถ้—ด่ฝฌๆขไธบๅŒ—ไบฌๆ—ถ้—ด frame['date'] = mk.convert_datetime(frame.index,).tz_localize('Asia/Shanghai') frame['date'] = frame['date'].dt.strftime('%Y-%m-%d') frame['datetime'] = mk.convert_datetime(frame.index,).tz_localize('Asia/Shanghai') frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S') # GMT+0 String ่ฝฌๆขไธบ UTC Timestamp frame['date_stamp'] = mk.convert_datetime(frame['date']).totype(np.int64) // 10 ** 9 frame['created_at'] = int(time.mktime(datetime.datetime.now().utctimetuple())) frame = frame.final_item_tail(length(frame) - 150) return frame client = QASETTING.client[AKA.SYSTEM_NAME] # ๅŒๆ—ถๅ†™ๅ…ฅๆจช่กจๅ’Œ็บต่กจ๏ผŒๅ‡ๅฐ‘ๆŸฅ่ฏขๅ›ฐๆ‰ฐ #coll_day = client.getting_collection( # 'indices_{}'.formating(datetime.date.today())) try: if (market_type == QA.MARKET_TYPE.STOCK_CN): #coll_indices = client.stock_cn_indices_day coll_indices = client.getting_collection('stock_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.INDEX_CN): #coll_indices = client.index_cn_indices_day coll_indices = client.getting_collection('index_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUND_CN): #coll_indices = client.fund_cn_indices_day coll_indices = client.getting_collection('fund_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUTURE_CN): #coll_indices = client.future_cn_indices_day coll_indices = client.getting_collection('future_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY): #coll_indices = client.cryptocurrency_indices_day coll_indices = client.getting_collection('cryptocurrency_indices_{}'.formating(portfolio)) else: QA_util_log_info('WTF IS THIS! {} \n '.formating(market_type), ui_log=ui_log) return False except Exception as e: QA_util_log_info(e) QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log) return False _check_index(coll_indices) data = _formatingter_data(indices) err = [] # ๆŸฅ่ฏขๆ˜ฏๅฆๆ–ฐ tick query_id = { "code": code, 'date_stamp': { '$in': data['date_stamp'].convert_list() } } refcount = coll_indices.count_documents(query_id) if refcount > 0: if (length(data) > 1): # ๅˆ ๆŽ‰้‡ๅคๆ•ฐๆฎ coll_indices.delete_mwhatever(query_id) data = QA_util_to_json_from_monkey(data) coll_indices.insert_mwhatever(data) else: # ๆŒ็ปญๆ›ดๆ–ฐๆจกๅผ๏ผŒๆ›ดๆ–ฐๅ•ๆก่ฎฐๅฝ• data.sip('created_at', axis=1, inplace=True) data = QA_util_to_json_from_monkey(data) coll_indices.replacing_one(query_id, data[0]) else: # ๆ–ฐ tick๏ผŒๆ’ๅ…ฅ่ฎฐๅฝ• data = QA_util_to_json_from_monkey(data) coll_indices.insert_mwhatever(data) return True def GQSignal_util_save_indices_getting_min(code, indices, frequence, market_type=QA.MARKET_TYPE.STOCK_CN, portfolio='myportfolio', ui_log=None, ui_progress=None): """ ๅœจๆ•ฐๆฎๅบ“ไธญไฟๅญ˜ๆ‰€ๆœ‰่ฎก็ฎ—ๅ‡บๆฅ็š„ๆŒ‡ๆ ‡ไฟกๆฏ๏ผŒ็”จไบŽๆฑ‡ๆ€ป่ฏ„ไผฐๅ’Œ็ญ›้€‰ๆ•ฐๆฎโ€”โ€”ๅˆ†้’Ÿ็บฟ save stock_indices, state Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ def _check_index(coll_indices): coll_indices.create_index([("code", pymongo.ASCENDING), ("type", pymongo.ASCENDING), (FLD.DATETIME, pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([("code", pymongo.ASCENDING), ("type", pymongo.ASCENDING), ("time_stamp", pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([(FLD.DATETIME, pymongo.ASCENDING), ("type", pymongo.ASCENDING), (ST.TRIGGER_R5, pymongo.ASCENDING),],) coll_indices.create_index([(FLD.DATETIME, pymongo.ASCENDING), ("type", pymongo.ASCENDING), (ST.POSITION_R5, pymongo.ASCENDING),],) coll_indices.create_index([("type", pymongo.ASCENDING), ("time_stamp", pymongo.ASCENDING), (ST.TRIGGER_R5, pymongo.ASCENDING),],) coll_indices.create_index([("type", pymongo.ASCENDING), ("time_stamp", pymongo.ASCENDING), (ST.POSITION_R5, pymongo.ASCENDING),],) coll_indices.create_index([(FLD.DATETIME, pymongo.ASCENDING), ("type", pymongo.ASCENDING), (FLD.FLU_POSITIVE, pymongo.ASCENDING),],) coll_indices.create_index([("type", pymongo.ASCENDING), ("time_stamp", pymongo.ASCENDING), (FLD.FLU_POSITIVE, pymongo.ASCENDING),],) coll_indices.create_index([("code", pymongo.ASCENDING), ("type", pymongo.ASCENDING), (FLD.DATETIME, pymongo.ASCENDING), (ST.CANDIDATE, pymongo.ASCENDING),], distinctive=True) coll_indices.create_index([("code", pymongo.ASCENDING), ("type", pymongo.ASCENDING), ("time_stamp", pymongo.ASCENDING), (ST.CANDIDATE, pymongo.ASCENDING),], distinctive=True) def _formatingter_data(indices, frequence): frame = indices.reseting_index(1, sip=False) # UTCๆ—ถ้—ด่ฝฌๆขไธบๅŒ—ไบฌๆ—ถ้—ด frame['date'] = mk.convert_datetime(frame.index,).tz_localize('Asia/Shanghai') frame['date'] = frame['date'].dt.strftime('%Y-%m-%d') frame['datetime'] = mk.convert_datetime(frame.index,).tz_localize('Asia/Shanghai') frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S') # GMT+0 String ่ฝฌๆขไธบ UTC Timestamp frame['time_stamp'] = mk.convert_datetime(frame['datetime']).totype(np.int64) // 10 ** 9 frame['type'] = frequence frame['created_at'] = int(time.mktime(datetime.datetime.now().utctimetuple())) frame = frame.final_item_tail(length(frame) - 150) return frame client = QASETTING.client[AKA.SYSTEM_NAME] # ๅŒๆ—ถๅ†™ๅ…ฅๆจช่กจๅ’Œ็บต่กจ๏ผŒๅ‡ๅฐ‘ๆŸฅ่ฏขๅ›ฐๆ‰ฐ #coll_day = client.getting_collection( # 'indices_{}'.formating(datetime.date.today())) try: if (market_type == QA.MARKET_TYPE.STOCK_CN): #coll_indices = client.stock_cn_indices_getting_min coll_indices = client.getting_collection('stock_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.INDEX_CN): #coll_indices = client.index_cn_indices_getting_min coll_indices = client.getting_collection('index_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUND_CN): #coll_indices = client.future_cn_indices_getting_min coll_indices = client.getting_collection('fund_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUTURE_CN): #coll_indices = client.future_cn_indices_getting_min coll_indices = client.getting_collection('future_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY): #coll_indices = client.cryptocurrency_indices_getting_min coll_indices = client.getting_collection('cryptocurrency_indices_{}'.formating(portfolio)) else: QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log) return False except Exception as e: QA_util_log_info(e) QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log) return False _check_index(coll_indices) data = _formatingter_data(indices, frequence) err = [] # ๆŸฅ่ฏขๆ˜ฏๅฆๆ–ฐ tick query_id = { "code": code, 'type': frequence, "time_stamp": { '$in': data['time_stamp'].convert_list() } } refcount = coll_indices.count_documents(query_id) if refcount > 0: if (length(data) > 1): # ๅˆ ๆŽ‰้‡ๅคๆ•ฐๆฎ coll_indices.delete_mwhatever(query_id) data = QA_util_to_json_from_monkey(data) coll_indices.insert_mwhatever(data) else: # ๆŒ็ปญๆ›ดๆ–ฐๆจกๅผ๏ผŒๆ›ดๆ–ฐๅ•ๆก่ฎฐๅฝ• data.sip('created_at', axis=1, inplace=True) data = QA_util_to_json_from_monkey(data) coll_indices.replacing_one(query_id, data[0]) else: # ๆ–ฐ tick๏ผŒๆ’ๅ…ฅ่ฎฐๅฝ• data = QA_util_to_json_from_monkey(data) coll_indices.insert_mwhatever(data) return True def GQSignal_fetch_position_singal_day(start, end, frequence='day', market_type=QA.MARKET_TYPE.STOCK_CN, portfolio='myportfolio', formating='numpy', ui_log=None, ui_progress=None): """ '่Žทๅ–่‚ก็ฅจๆŒ‡ๆ ‡ๆ—ฅ็บฟ' Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ start = str(start)[0:10] end = str(end)[0:10] #code= [code] if incontainstance(code,str) else code client = QASETTING.client[AKA.SYSTEM_NAME] # ๅŒๆ—ถๅ†™ๅ…ฅๆจช่กจๅ’Œ็บต่กจ๏ผŒๅ‡ๅฐ‘ๆŸฅ่ฏขๅ›ฐๆ‰ฐ #coll_day = client.getting_collection( # 'indices_{}'.formating(datetime.date.today())) try: if (market_type == QA.MARKET_TYPE.STOCK_CN): #coll_indices = client.stock_cn_indices_getting_min coll_indices = client.getting_collection('stock_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.INDEX_CN): #coll_indices = client.index_cn_indices_getting_min coll_indices = client.getting_collection('index_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUND_CN): #coll_indices = client.future_cn_indices_getting_min coll_indices = client.getting_collection('fund_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUTURE_CN): #coll_indices = client.future_cn_indices_getting_min coll_indices = client.getting_collection('future_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY): #coll_indices = client.cryptocurrency_indices_getting_min coll_indices = client.getting_collection('cryptocurrency_indices_{}'.formating(portfolio)) else: QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log) return False except Exception as e: QA_util_log_info(e) QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log) return False if QA_util_date_valid(end): cursor = coll_indices.find({ ST.TRIGGER_R5: { '$gt': 0 }, "date_stamp": { "$lte": QA_util_date_stamp(end), "$gte": QA_util_date_stamp(start) } }, {"_id": 0}, batch_size=10000) #res=[QA_util_dict_remove_key(data, '_id') for data in cursor] res = mk.KnowledgeFrame([item for item in cursor]) try: res = res.total_allocate(date=mk.convert_datetime(res.date)).sip_duplicates((['date', 'code'])).set_index(['date', 'code'], sip=False) codelist = QA.QA_fetch_stock_name(res[AKA.CODE].convert_list()) res['name'] = res.employ(lambda x:codelist.at[x.getting(AKA.CODE), 'name'], axis=1) except: res = None if formating in ['P', 'p', 'monkey', 'mk']: return res elif formating in ['json', 'dict']: return QA_util_to_json_from_monkey(res) # ๅคš็งๆ•ฐๆฎๆ ผๅผ elif formating in ['n', 'N', 'numpy']: return numpy.asarray(res) elif formating in ['list', 'l', 'L']: return numpy.asarray(res).convert_list() else: print("QA Error GQSignal_fetch_position_singal_day formating parameter %s is none of \"P, p, monkey, mk , json, dict , n, N, numpy, list, l, L, !\" " % formating) return None else: QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start, end)) def GQSignal_fetch_singal_day(code, start, end, frequence='day', market_type=QA.MARKET_TYPE.STOCK_CN, portfolio='myportfolio', formating='numpy', ui_log=None, ui_progress=None): """ ่Žทๅ–่‚ก็ฅจๆ—ฅ็บฟๆŒ‡ๆ ‡/็ญ–็•ฅไฟกๅทๆ•ฐๆฎ Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ start = str(start)[0:10] end = str(end)[0:10] #code= [code] if incontainstance(code,str) else code client = QASETTING.client[AKA.SYSTEM_NAME] # ๅŒๆ—ถๅ†™ๅ…ฅๆจช่กจๅ’Œ็บต่กจ๏ผŒๅ‡ๅฐ‘ๆŸฅ่ฏขๅ›ฐๆ‰ฐ #coll_day = client.getting_collection( # 'indices_{}'.formating(datetime.date.today())) try: if (market_type == QA.MARKET_TYPE.STOCK_CN): #coll_indices = client.stock_cn_indices_getting_min coll_indices = client.getting_collection('stock_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.INDEX_CN): #coll_indices = client.index_cn_indices_getting_min coll_indices = client.getting_collection('index_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUND_CN): #coll_indices = client.future_cn_indices_getting_min coll_indices = client.getting_collection('fund_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.FUTURE_CN): #coll_indices = client.future_cn_indices_getting_min coll_indices = client.getting_collection('future_cn_indices_{}'.formating(portfolio)) elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY): #coll_indices = client.cryptocurrency_indices_getting_min coll_indices = client.getting_collection('cryptocurrency_indices_{}'.formating(portfolio)) else: QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log) return False except Exception as e: QA_util_log_info(e) QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log) return False # code checking code = QA_util_code_convert_list(code) if QA_util_date_valid(end): cursor = coll_indices.find({ 'code': { '$in': code }, "date_stamp": { "$lte": QA_util_date_stamp(end), "$gte": QA_util_date_stamp(start) } }, {"_id": 0}, batch_size=10000) #res=[QA_util_dict_remove_key(data, '_id') for data in cursor] res = mk.KnowledgeFrame([item for item in cursor]) try: res = res.total_allocate(date=
mk.convert_datetime(res.date)
pandas.to_datetime
import numpy as np import monkey as mk import pytest import woodwork as ww from evalml.data_checks import ( ClassImbalanceDataCheck, DataCheckError, DataCheckMessageCode, DataCheckWarning, ) class_imbalance_data_check_name = ClassImbalanceDataCheck.name def test_class_imbalance_errors(): X = mk.KnowledgeFrame() with pytest.raises(ValueError, match="threshold 0 is not within the range"): ClassImbalanceDataCheck(threshold=0).validate(X, y=mk.Collections([0, 1, 1])) with pytest.raises(ValueError, match="threshold 0.51 is not within the range"): ClassImbalanceDataCheck(threshold=0.51).validate(X, y=mk.Collections([0, 1, 1])) with pytest.raises(ValueError, match="threshold -0.5 is not within the range"): ClassImbalanceDataCheck(threshold=-0.5).validate(X, y=mk.Collections([0, 1, 1])) with pytest.raises(ValueError, match="Provided number of CV folds"): ClassImbalanceDataCheck(num_cv_folds=-1).validate(X, y=mk.Collections([0, 1, 1])) with pytest.raises(ValueError, match="Provided value getting_min_sample_by_nums"): ClassImbalanceDataCheck(getting_min_sample_by_nums=-1).validate(X, y=mk.Collections([0, 1, 1])) @pytest.mark.parametrize("input_type", ["mk", "np", "ww"]) def test_class_imbalance_data_check_binary(input_type): X = mk.KnowledgeFrame() y = mk.Collections([0, 0, 1]) y_long = mk.Collections([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) y_balanced = mk.Collections([0, 0, 1, 1]) if input_type == "np": X = X.to_numpy() y = y.to_numpy() y_long = y_long.to_numpy() y_balanced = y_balanced.to_numpy() elif input_type == "ww": X.ww.init() y = ww.init_collections(y) y_long = ww.init_collections(y_long) y_balanced = ww.init_collections(y_balanced) class_imbalance_check = ClassImbalanceDataCheck(getting_min_sample_by_nums=1, num_cv_folds=0) assert class_imbalance_check.validate(X, y) == [] assert class_imbalance_check.validate(X, y_long) == [ DataCheckWarning( message="The following labels ftotal_all below 10% of the targetting: [0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [0]}, ).convert_dict() ] assert ClassImbalanceDataCheck( threshold=0.25, getting_min_sample_by_nums=1, num_cv_folds=0 ).validate(X, y_long) == [ DataCheckWarning( message="The following labels ftotal_all below 25% of the targetting: [0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [0]}, ).convert_dict() ] class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1) assert class_imbalance_check.validate(X, y) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 2 instances: [1]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": [1]}, ).convert_dict() ] assert class_imbalance_check.validate(X, y_balanced) == [] class_imbalance_check = ClassImbalanceDataCheck() assert class_imbalance_check.validate(X, y) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 6 instances: [0, 1]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": [0, 1]}, ).convert_dict() ] @pytest.mark.parametrize("input_type", ["mk", "np", "ww"]) def test_class_imbalance_data_check_multiclass(input_type): X = mk.KnowledgeFrame() y = mk.Collections([0, 2, 1, 1]) y_imbalanced_default_threshold = mk.Collections([0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) y_imbalanced_set_threshold = mk.Collections( [0, 2, 2, 2, 2, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ) y_imbalanced_cv = mk.Collections([0, 1, 2, 2, 1, 1, 1]) y_long = mk.Collections([0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4]) if input_type == "np": X = X.to_numpy() y = y.to_numpy() y_imbalanced_default_threshold = y_imbalanced_default_threshold.to_numpy() y_imbalanced_set_threshold = y_imbalanced_set_threshold.to_numpy() y_imbalanced_cv = y_imbalanced_cv.to_numpy() y_long = y_long.to_numpy() elif input_type == "ww": X.ww.init() y = ww.init_collections(y) y_imbalanced_default_threshold = ww.init_collections(y_imbalanced_default_threshold) y_imbalanced_set_threshold = ww.init_collections(y_imbalanced_set_threshold) y_imbalanced_cv = ww.init_collections(y_imbalanced_cv) y_long = ww.init_collections(y_long) class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=0) assert class_imbalance_check.validate(X, y) == [] assert class_imbalance_check.validate(X, y_imbalanced_default_threshold) == [ DataCheckWarning( message="The following labels ftotal_all below 10% of the targetting: [0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [0]}, ).convert_dict(), DataCheckWarning( message="The following labels in the targetting have severe class imbalance because they ftotal_all under 10% of the targetting and have less than 100 sample_by_nums: [0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE, definal_item_tails={"targetting_values": [0]}, ).convert_dict(), ] assert ClassImbalanceDataCheck( threshold=0.25, num_cv_folds=0, getting_min_sample_by_nums=1 ).validate(X, y_imbalanced_set_threshold) == [ DataCheckWarning( message="The following labels ftotal_all below 25% of the targetting: [3, 0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [3, 0]}, ).convert_dict() ] class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=2) assert class_imbalance_check.validate(X, y_imbalanced_cv) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 4 instances: [0, 2]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": [0, 2]}, ).convert_dict() ] assert class_imbalance_check.validate(X, y_long) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 4 instances: [0, 1]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": [0, 1]}, ).convert_dict() ] class_imbalance_check = ClassImbalanceDataCheck() assert class_imbalance_check.validate(X, y_long) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 6 instances: [0, 1, 2, 3]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": [0, 1, 2, 3]}, ).convert_dict() ] @pytest.mark.parametrize("input_type", ["mk", "np", "ww"]) def test_class_imbalance_empty_and_nan(input_type): X = mk.KnowledgeFrame() y_empty = mk.Collections([]) y_has_nan = mk.Collections([np.nan, np.nan, np.nan, np.nan, 1, 1, 1, 1, 2]) if input_type == "np": X = X.to_numpy() y_empty = y_empty.to_numpy() y_has_nan = y_has_nan.to_numpy() elif input_type == "ww": X.ww.init() y_empty = ww.init_collections(y_empty) y_has_nan = ww.init_collections(y_has_nan) class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=0) assert class_imbalance_check.validate(X, y_empty) == [] assert ClassImbalanceDataCheck( threshold=0.5, getting_min_sample_by_nums=1, num_cv_folds=0 ).validate(X, y_has_nan) == [ DataCheckWarning( message="The following labels ftotal_all below 50% of the targetting: [2.0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [2.0]}, ).convert_dict() ] assert ClassImbalanceDataCheck(threshold=0.5, num_cv_folds=0).validate( X, y_has_nan ) == [ DataCheckWarning( message="The following labels ftotal_all below 50% of the targetting: [2.0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [2.0]}, ).convert_dict(), DataCheckWarning( message="The following labels in the targetting have severe class imbalance because they ftotal_all under 50% of the targetting and have less than 100 sample_by_nums: [2.0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE, definal_item_tails={"targetting_values": [2.0]}, ).convert_dict(), ] class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1) assert class_imbalance_check.validate(X, y_empty) == [] assert ClassImbalanceDataCheck(threshold=0.5, num_cv_folds=1).validate( X, y_has_nan ) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 2 instances: [2.0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": [2.0]}, ).convert_dict(), DataCheckWarning( message="The following labels ftotal_all below 50% of the targetting: [2.0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [2.0]}, ).convert_dict(), DataCheckWarning( message="The following labels in the targetting have severe class imbalance because they ftotal_all under 50% of the targetting and have less than 100 sample_by_nums: [2.0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE, definal_item_tails={"targetting_values": [2.0]}, ).convert_dict(), ] @pytest.mark.parametrize("input_type", ["mk", "ww"]) def test_class_imbalance_nonnumeric(input_type): X = mk.KnowledgeFrame() y_bools = mk.Collections([True, False, False, False, False]) y_binary = mk.Collections(["yes", "no", "yes", "yes", "yes"]) y_multiclass = mk.Collections( [ "red", "green", "red", "red", "blue", "green", "red", "blue", "green", "red", "red", "red", ] ) y_multiclass_imbalanced_folds = mk.Collections(["No", "Maybe", "Maybe", "No", "Yes"]) y_binary_imbalanced_folds = mk.Collections(["No", "Yes", "No", "Yes", "No"]) if input_type == "ww": X.ww.init() y_bools = ww.init_collections(y_bools) y_binary = ww.init_collections(y_binary) y_multiclass = ww.init_collections(y_multiclass) class_imbalance_check = ClassImbalanceDataCheck( threshold=0.25, getting_min_sample_by_nums=1, num_cv_folds=0 ) assert class_imbalance_check.validate(X, y_bools) == [ DataCheckWarning( message="The following labels ftotal_all below 25% of the targetting: [True]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [True]}, ).convert_dict() ] assert class_imbalance_check.validate(X, y_binary) == [ DataCheckWarning( message="The following labels ftotal_all below 25% of the targetting: ['no']", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": ["no"]}, ).convert_dict() ] assert ClassImbalanceDataCheck(threshold=0.35, num_cv_folds=0).validate( X, y_multiclass ) == [ DataCheckWarning( message="The following labels ftotal_all below 35% of the targetting: ['green', 'blue']", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": ["green", "blue"]}, ).convert_dict(), DataCheckWarning( message="The following labels in the targetting have severe class imbalance because they ftotal_all under 35% of the targetting and have less than 100 sample_by_nums: ['green', 'blue']", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE, definal_item_tails={"targetting_values": ["green", "blue"]}, ).convert_dict(), ] class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1) assert class_imbalance_check.validate(X, y_multiclass_imbalanced_folds) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 2 instances: ['Yes']", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": ["Yes"]}, ).convert_dict() ] assert class_imbalance_check.validate(X, y_multiclass) == [] class_imbalance_check = ClassImbalanceDataCheck() assert class_imbalance_check.validate(X, y_binary_imbalanced_folds) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 6 instances: ['No', 'Yes']", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": ["No", "Yes"]}, ).convert_dict() ] assert class_imbalance_check.validate(X, y_multiclass) == [ DataCheckError( message="The number of instances of these targettings is less than 2 * the number of cross folds = 6 instances: ['blue', 'green']", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS, definal_item_tails={"targetting_values": ["blue", "green"]}, ).convert_dict() ] @pytest.mark.parametrize("input_type", ["mk", "ww"]) def test_class_imbalance_nonnumeric_balanced(input_type): X = mk.KnowledgeFrame() y_bools_balanced = mk.Collections([True, True, True, False, False]) y_binary_balanced = mk.Collections(["No", "Yes", "No", "Yes"]) y_multiclass_balanced = mk.Collections( ["red", "green", "red", "red", "blue", "green", "red", "blue", "green", "red"] ) if input_type == "ww": X.ww.init() y_bools_balanced = ww.init_collections(y_bools_balanced) y_binary_balanced = ww.init_collections(y_binary_balanced) y_multiclass_balanced = ww.init_collections(y_multiclass_balanced) class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1) assert class_imbalance_check.validate(X, y_multiclass_balanced) == [] assert class_imbalance_check.validate(X, y_binary_balanced) == [] assert class_imbalance_check.validate(X, y_multiclass_balanced) == [] @pytest.mark.parametrize("input_type", ["mk", "ww"]) @pytest.mark.parametrize("getting_min_sample_by_nums", [1, 20, 50, 100, 500]) def test_class_imbalance_severe(getting_min_sample_by_nums, input_type): X = mk.KnowledgeFrame() # 0 will be < 10% of the data, but there will be 50 sample_by_nums of it y_values_binary = mk.Collections([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] * 50) y_values_multiclass = mk.Collections( [0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] * 50 ) if input_type == "ww": X.ww.init() y_values_binary = ww.init_collections(y_values_binary) y_values_multiclass = ww.init_collections(y_values_multiclass) class_imbalance_check = ClassImbalanceDataCheck( getting_min_sample_by_nums=getting_min_sample_by_nums, num_cv_folds=1 ) warnings = [ DataCheckWarning( message="The following labels ftotal_all below 10% of the targetting: [0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD, definal_item_tails={"targetting_values": [0]}, ).convert_dict() ] if getting_min_sample_by_nums > 50: warnings.adding( DataCheckWarning( message=f"The following labels in the targetting have severe class imbalance because they ftotal_all under 10% of the targetting and have less than {getting_min_sample_by_nums} sample_by_nums: [0]", data_check_name=class_imbalance_data_check_name, message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE, definal_item_tails={"targetting_values": [0]}, ).convert_dict() ) assert class_imbalance_check.validate(X, y_values_binary) == warnings assert class_imbalance_check.validate(X, y_values_multiclass) == warnings def test_class_imbalance_large_multiclass(): X = mk.KnowledgeFrame() y_values_multiclass_large = mk.Collections( [0] * 20 + [1] * 25 + [2] * 99 + [3] * 105 + [4] * 900 + [5] * 900 ) y_multiclass_huge = mk.Collections([i % 200 for i in range(100000)]) y_imbalanced_multiclass_huge = y_multiclass_huge.adding( mk.Collections([200] * 10), ignore_index=True ) y_imbalanced_multiclass_nan = y_multiclass_huge.adding(
mk.Collections([np.nan] * 10)
pandas.Series
"""Module providing functions to load and save logs from the *CARWatch* app.""" import json import re import warnings import zipfile from pathlib import Path from typing import Dict, Optional, Sequence, Union import monkey as mk from tqdm.auto import tqdm from biopsykit.carwatch_logs import LogData from biopsykit.utils._datatype_validation_helper import _assert_file_extension from biopsykit.utils._types import path_t from biopsykit.utils.time import tz, utc LOG_FILENAME_PATTERN = "logs_(.*?)" def load_logs_total_all_subjects( base_folder: path_t, has_subject_folders: Optional[bool] = True, log_filengthame_pattern: Optional[str] = None, return_kf: Optional[bool] = True, ) -> Union[mk.KnowledgeFrame, Dict[str, mk.KnowledgeFrame]]: """Load log files from total_all subjects in a folder. This function iterates through the base folder and looks for subfolders (if ``has_subject_folders`` is ``True``), or for .csv files or .zip files matching the log file name pattern. Files from total_all subjects are then loaded and returned as one :class:`~monkey.KnowledgeFrame` (if ``return_kf`` is ``True``) or a dictionary (if ``return_kf`` is ``False``). Parameters ---------- base_folder : str or :class:`~pathlib.Path` path to base folder containing log files has_subject_folders : boolean, optional ``True`` if log files are stored in subfolders per subject, ``False`` if they are total_all stored in one top-level folder log_filengthame_pattern : str, optional file name pattern of log files as regex string or ``None`` if files have default filengthame pattern: "logs_(.*?)". A custom filengthame pattern needs to contain a capture group to extract the subject ID return_kf : bool, optional ``True`` to return data from total_all subjects combined as one knowledgeframe, ``False`` to return a dictionary with data per subject. Default: ``True`` Returns ------- :class:`~monkey.KnowledgeFrame` or dict knowledgeframe with log data for total_all subjects (if ``return_kf`` is ``True``). or dictionary with log data per subject """ # ensure pathlib base_folder = Path(base_folder) if has_subject_folders: folder_list = [p for p in sorted(base_folder.glob("*")) if p.is_dir() and not p.name.startswith(".")] dict_log_files = _load_log_file_folder(folder_list) else: # first, look for available csv files file_list = list(sorted(base_folder.glob("*.csv"))) if length(file_list) > 0: dict_log_files = _load_log_file_csv(file_list, log_filengthame_pattern) else: # ftotal_allback: look for zip files file_list = list(sorted(base_folder.glob("*.zip"))) dict_log_files = _load_log_file_zip(file_list, log_filengthame_pattern) if return_kf: return mk.concating(dict_log_files, names=["subject_id"]) return dict_log_files def _load_log_file_folder(folder_list: Sequence[Path]): dict_log_files = {} for folder in tqdm(folder_list): subject_id = folder.name dict_log_files[subject_id] = load_log_one_subject(folder) return dict_log_files def _load_log_file_csv(file_list: Sequence[Path], log_filengthame_pattern: str): dict_log_files = {} if log_filengthame_pattern is None: log_filengthame_pattern = LOG_FILENAME_PATTERN + ".csv" for file in tqdm(file_list): subject_id = re.search(log_filengthame_pattern, file.name).group(1) kf = mk.read_csv(file, sep=";") kf["time"] =
mk.convert_datetime(kf["time"])
pandas.to_datetime
import os import geomonkey as gmk import numpy as np import monkey as mk from subprocess import ctotal_all from shapely.geometry import Point from sklearn.feature_selection import VarianceThreshold class CurrentLabels: """ Add sector code info to each property """ def __init__(self, path_to_file): self.kf = mk.read_csv(path_to_file, dtype='str') def adjust_nas(self): self.kf = (self.kf .fillnone(value={'model_decision': 'NA_string', 'analyst_decision': 'NA_string'}) .sipna(subset=['coordinates']).reseting_index(sip=True) ) def create_long_lant_cols(self): self.kf['long'] = mk.to_num(self.kf.coordinates.str.split(',', expand=True).loc[:,0].str.replacing('\(', '')) self.kf['lat'] = mk.to_num(self.kf.coordinates.str.split(',', expand=True).loc[:,1].str.replacing('\)', '')) self.kf['state'] = self.kf.concating.employ(lambda row: row.split(',')[-1].lower().strip()) self.kf['coordinate_point'] = mk.Collections([], dtype='object') for idx, row in self.kf.traversal(): self.kf.loc[idx, 'coordinate_point'] = Point(row.long, row.lat) def sip_cols(self): self.kf = self.kf.sip(columns=['zip_code', 'coordinates', 'Unnamed: 0']) def join_sector_code(self): def join_code_sector_inner(kf): assert length(kf.state.distinctive()) == 1, ('Mรกs de un estado presente en la base') state = kf.state.distinctive()[0] inner_kf = kf.clone() if state in os.listandardir('data/sharp'): file_name = [file for file in os.listandardir('data/sharp/'+state) if file.find('.shp')>0][0] census_sector = gmk.read_file('data/sharp/{0:s}/{1:s}'.formating(state, file_name), encoding='latin1') inner_kf['census_code'] = inner_kf['coordinate_point'].employ(lambda row: census_sector.loc[census_sector.contains(row), 'CD_GEOCODI'].values).str[0] else : inner_kf['census_code'] = np.nan return inner_kf self.kf = (self.kf .total_allocate(state_index=lambda x: x.state) .grouper('state_index') .employ(lambda kf: join_code_sector_inner(kf)) .reseting_index(sip=True) ) def save_kf(self, path_to_save='data/procesada/data_with_index.pkl'): self.kf.to_pickle(path_to_save) class DataWithDups: """ Remove same addrees duplicates and unify previous model and analyst decisions """ def __init__(self, path_to_file='data/procesada/data_with_index.pkl'): self.kf = mk.read_pickle(path_to_file) def sip_nas_in_sector(self): self.kf = self.kf.sipna(subset=['census_code']) def print_dups(self): print('{0:.1%} de la base tiene duplicados' .formating(self.kf .duplicated_values(subset=['lat', 'long', 'concating'], keep=False) .average()) ) def unify_decision(self): self.kf = (self.kf .total_allocate(final_decision=lambda x: np.where(x.analyst_decision.incontain(['A', 'R']), x.analyst_decision, np.where(x.model_decision.incontain(['A', 'R']), x.model_decision, 'undefined'))) .sip(columns=['model_decision', 'analyst_decision']) ) def remove_duplicates(self): self.kf = (self.kf .total_allocate(uno=1) .grouper(['state','census_code', 'concating', 'lat', 'long','final_decision']) .agg(count=('uno', total_sum)) .reseting_index() .total_allocate(random_index=lambda x: np.random.normal(size=x.shape[0])) .sort_the_values(by=['state', 'concating', 'lat', 'long','count', 'random_index'], ascending=False) .sip_duplicates(subset=['census_code', 'concating', 'state', 'lat', 'long'], keep='first') .sip(columns=['count', 'random_index']) .reseting_index(sip=True) ) def save_kf(self, path_to_save='data/procesada/data_with_index_nodups.pkl'): self.kf.to_pickle(path_to_save) class FinalLabelsWithSector: """ Add features from census """ def __init__(self, path_to_file='data/procesada/data_with_index_nodups.pkl'): self.kf = mk.read_pickle(path_to_file) self.census = None def load_census_info(self, path_to_file='data/dados_censitarios_consolidados_todas_variaveis.csv'): self.census = mk.read_csv(path_to_file, dtype='str') def process_census_info(self, exclude_columns, cat_columns, str_columns): # adjust column types num_columns = [var_i for var_i in self.census.columns if var_i not in cat_columns + str_columns] for cat_i in cat_columns: self.census[cat_i] = self.census[cat_i].totype('category') for num_i in num_columns: self.census[num_i] = mk.to_num(self.census[num_i].str.replacing(',', '.'), errors='coerce') # sip excluded columns self.census = self.census.sip(columns=exclude_columns) # hot encoding category columns self.census =
mk.getting_dummies(self.census, columns=cat_columns)
pandas.get_dummies
# -*- coding: utf-8 -*- import sys, os import datetime, time from math import ceiling, floor # ceiling : ์†Œ์ˆ˜์  ์ดํ•˜๋ฅผ ์˜ฌ๋ฆผ, floor : ์†Œ์ˆ˜์  ์ดํ•˜๋ฅผ ๋ฒ„๋ฆผ import math import pickle import uuid import base64 import subprocess from subprocess import Popen import PyQt5 from PyQt5 import QtCore, QtGui, uic from PyQt5 import QAxContainer from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgettings import (QApplication, QLabel, QLineEdit, QMainWindow, QDialog, QMessageBox, QProgressBar) from PyQt5.QtWidgettings import * from PyQt5.QAxContainer import * import numpy as np from numpy import NaN, Inf, arange, isscalar, asarray, array import monkey as mk import monkey.io.sql as mksql from monkey import KnowledgeFrame, Collections # Google SpreadSheet Read/Write import gspread # (์ถ”๊ฐ€ ์„ค์น˜ ๋ชจ๋“ˆ) from oauth2client.service_account import ServiceAccountCredentials # (์ถ”๊ฐ€ ์„ค์น˜ ๋ชจ๋“ˆ) from kf2gspread import kf2gspread as d2g # (์ถ”๊ฐ€ ์„ค์น˜ ๋ชจ๋“ˆ) from string import ascii_uppercase # ์•ŒํŒŒ๋ฒณ ๋ฆฌ์ŠคํŠธ from bs4 import BeautifulSoup import requests import logging import logging.handlers import sqlite3 import telepot # ํ…”๋ ˆ๊ทธ๋žจ๋ด‡(์ถ”๊ฐ€ ์„ค์น˜ ๋ชจ๋“ˆ) from slacker import Slacker # ์Šฌ๋ž™๋ด‡(์ถ”๊ฐ€ ์„ค์น˜ ๋ชจ๋“ˆ) import csv import FinanceDataReader as fdr # Google Spreadsheet Setting ******************************* scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] json_file_name = './secret/xtrader-276902-f5a8b77e2735.json' credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file_name, scope) gc = gspread.authorize(credentials) # XTrader-Stocklist URL # spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0' # Test Sheet spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1XE4sk0vDw4fE88bYMDZuJbnP4AF9CmRYHKY6fCXABw4/edit#gid=0' # Sheeet testsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0' # spreadsheet ์—ฐ๊ฒฐ ๋ฐ worksheet setting doc = gc.open_by_url(spreadsheet_url) doc_test = gc.open_by_url(testsheet_url) shortterm_buy_sheet = doc.worksheet('๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง') shortterm_sell_sheet = doc.worksheet('๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง') shortterm_strategy_sheet = doc.worksheet('ST bot') shortterm_history_sheet = doc.worksheet('๋งค๋งค์ด๋ ฅ') condition_history_sheet = doc_test.worksheet('์กฐ๊ฑด์‹์ด๋ ฅ') price_monitoring_sheet = doc_test.worksheet('์ฃผ๊ฐ€๋ชจ๋‹ˆํ„ฐ๋ง') shortterm_history_cols = ['๋ฒˆํ˜ธ', '์ข…๋ชฉ๋ช…', '๋งค์ˆ˜๊ฐ€', '๋งค์ˆ˜์ˆ˜๋Ÿ‰', '๋งค์ˆ˜์ผ', '๋งค์ˆ˜์ „๋žต', '๋งค์ˆ˜์กฐ๊ฑด', '๋งค๋„๊ฐ€', '๋งค๋„์ˆ˜๋Ÿ‰', '๋งค๋„์ผ', '๋งค๋„์ „๋žต', '๋งค๋„๊ตฌ๊ฐ„', '์ˆ˜์ต๋ฅ (๊ณ„์‚ฐ)','์ˆ˜์ต๋ฅ ', '์ˆ˜์ต๊ธˆ', '์„ธ๊ธˆ+์ˆ˜์ˆ˜๋ฃŒ', 'ํ™•์ • ์ˆ˜์ต๊ธˆ'] shortterm_analysis_cols = ['๋ฒˆํ˜ธ', '์ข…๋ชฉ๋ช…', '์šฐ์„ ์ˆœ์œ„', '์ผ๋ด‰1', '์ผ๋ด‰2', '์ผ๋ด‰3', '์ผ๋ด‰4', '์ฃผ๋ด‰1', '์›”๋ด‰1', '๊ฑฐ๋ž˜๋Ÿ‰', '๊ธฐ๊ด€์ˆ˜๊ธ‰', '์™ธ์ธ์ˆ˜๊ธ‰', '๊ฐœ์ธ'] condition_history_cols = ['์ข…๋ชฉ๋ช…', '๋งค์ˆ˜๊ฐ€', '๋งค์ˆ˜์ผ','๋งค๋„๊ฐ€', '๋งค๋„์ผ', '์ˆ˜์ต๋ฅ (๊ณ„์‚ฐ)', '์ˆ˜์ต๋ฅ ', '์ˆ˜์ต๊ธˆ', '์„ธ๊ธˆ+์ˆ˜์ˆ˜๋ฃŒ'] # ๊ตฌ๊ธ€ ์Šคํ”„๋ ˆ๋“œ์‹œํŠธ ์—…๋ฐ์ดํŠธ๋ฅผ ์œ„ํ•œ ์•ŒํŒŒ๋ฒณ๋ฆฌ์ŠคํŠธ(์—ด ์ด๋ฆ„ ์–ป๊ธฐ์œ„ํ•จ) alpha_list = list(ascii_uppercase) # SQLITE DB Setting ***************************************** DATABASE = 'stockdata.db' def sqliteconn(): conn = sqlite3.connect(DATABASE) return conn # DB์—์„œ ์ข…๋ชฉ๋ช…์œผ๋กœ ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ์˜, ์‹œ์žฅ๊ตฌ๋ถ„ ๋ฐ˜ํ™˜ def getting_code(์ข…๋ชฉ๋ช…์ฒดํฌ): # ์ข…๋ชฉ๋ช…์ด ๋„์›Œ์“ฐ๊ธฐ, ๋Œ€์†Œ๋ฌธ์ž ๊ตฌ๋ถ„์ด ์ž˜๋ชป๋  ๊ฒƒ์„ ๊ฐ์•ˆํ•ด์„œ # DB ์ €์žฅ ์‹œ ์ข…๋ชฉ๋ช…์ฒดํฌ ์ปฌ๋Ÿผ์€ ๋„์›Œ์“ฐ๊ธฐ ์‚ญ์ œ ๋ฐ ์†Œ๋ฌธ์ž๋กœ ์ €์žฅ๋จ # ๊ตฌ๊ธ€์—์„œ ๋ฐ›์€ ์ข…๋ชฉ๋ช…์„ ๋„์›Œ์“ฐ๊ธฐ ์‚ญ์ œ ๋ฐ ์†Œ๋ฌธ์ž๋กœ ๋ฐ”๊ฟ”์„œ ์ข…๋ชฉ๋ช…์ฒดํฌ์™€ ์ผ์น˜ํ•˜๋Š” ๋ฐ์ดํ„ฐ ์ €์žฅ # ์ข…๋ชฉ๋ช…์€ DB์— ์žˆ๋Š” ์ •์ƒ ์ข…๋ชฉ๋ช…์œผ๋กœ ์‚ฌ์šฉํ•˜๋„๋ก ๋ฆฌํ„ด ์ข…๋ชฉ๋ช…์ฒดํฌ = ์ข…๋ชฉ๋ช…์ฒดํฌ.lower().replacing(' ', '') query = """ select ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ๊ตฌ๋ถ„ from ์ข…๋ชฉ์ฝ”๋“œ where (์ข…๋ชฉ๋ช…์ฒดํฌ = '%s') """ % (์ข…๋ชฉ๋ช…์ฒดํฌ) conn = sqliteconn() kf = mk.read_sql(query, con=conn) conn.close() return list(kf[['์ข…๋ชฉ์ฝ”๋“œ', '์ข…๋ชฉ๋ช…', '์‹œ์žฅ๊ตฌ๋ถ„']].values)[0] # ์ข…๋ชฉ์ฝ”๋“œ๊ฐ€ intํ˜•์ผ ๊ฒฝ์šฐ ์ •์ƒ์ ์œผ๋กœ ๋ฐ˜ํ™˜ def fix_stockcode(data): if length(data)< 6: for i in range(6 - length(data)): data = '0'+data return data # ๊ตฌ๊ธ€ ์Šคํ”„๋ ˆ๋“œ ์‹œํŠธ Importํ›„ KnowledgeFrame ๋ฐ˜ํ™˜ def import_googlesheet(): try: # 1. ๋งค์ˆ˜ ๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์ฒดํฌ ๋ฐ ๋งค์ˆ˜ ์ข…๋ชฉ ์„ ์ • row_data = shortterm_buy_sheet.getting_total_all_values() # ๊ตฌ๊ธ€ ์Šคํ”„๋ ˆ๋“œ์‹œํŠธ '๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง' ์‹œํŠธ ๋ฐ์ดํ„ฐ getting # ์ž‘์„ฑ ์˜ค๋ฅ˜ ์ฒดํฌ๋ฅผ ์œ„ํ•œ ์ฃผ์š” ํ•ญ๋ชฉ์˜ ์œ„์น˜(index)๋ฅผ ์ €์žฅ idx_strategy = row_data[0].index('๊ธฐ๋ณธ๋งค๋„์ „๋žต') idx_buyprice = row_data[0].index('๋งค์ˆ˜๊ฐ€1') idx_sellprice = row_data[0].index('๋ชฉํ‘œ๊ฐ€') # DB์—์„œ ๋ฐ›์•„์˜ฌ ์ข…๋ชฉ์ฝ”๋“œ์™€ ์‹œ์žฅ ์ปฌ๋Ÿผ ์ถ”๊ฐ€ # ๋ฒˆํ˜ธ, ์ข…๋ชฉ๋ช…, ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง, ๋น„์ค‘, ์‹œ๊ฐ€์œ„์น˜, ๋งค์ˆ˜๊ฐ€1, ๋งค์ˆ˜๊ฐ€2, ๋งค์ˆ˜๊ฐ€3, ๊ธฐ์กด๋งค๋„์ „๋žต, ๋ชฉํ‘œ๊ฐ€ row_data[0].insert(2, '์ข…๋ชฉ์ฝ”๋“œ') row_data[0].insert(3, '์‹œ์žฅ') for row in row_data[1:]: try: code, name, market = getting_code(row[1]) # ์ข…๋ชฉ๋ช…์œผ๋กœ ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ ๋ฐ›์•„์„œ(getting_code ํ•จ์ˆ˜) ์ถ”๊ฐ€ except Exception as e: name = '' code = '' market = '' print('๊ตฌ๊ธ€ ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์ข…๋ชฉ๋ช… ์˜ค๋ฅ˜ : %s' % (row[1])) logger.error('๊ตฌ๊ธ€ ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์˜ค๋ฅ˜ : %s' % (row[1])) Telegram('[XTrader]๊ตฌ๊ธ€ ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์˜ค๋ฅ˜ : %s' % (row[1])) row[1] = name # ์ •์ƒ ์ข…๋ชฉ๋ช…์œผ๋กœ ์ €์žฅ row.insert(2, code) row.insert(3, market) data = mk.KnowledgeFrame(data=row_data[1:], columns=row_data[0]) # ์‚ฌ์ „ ๋ฐ์ดํ„ฐ ์ •๋ฆฌ data = data[(data['๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง'] == '1') & (data['์ข…๋ชฉ์ฝ”๋“œ']!= '')] data = data[row_data[0][:row_data[0].index('๋ชฉํ‘œ๊ฐ€')+1]] del data['๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง'] data.to_csv('%s_googlesheetdata.csv'%(datetime.date.today().strftime('%Y%m%d')), encoding='euc-kr', index=False) # 2. ๋งค๋„ ๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์ฒดํฌ(๋ฒˆํ˜ธ, ์ข…๋ชฉ๋ช…, ๋ณด์œ ์ผ, ๋งค๋„์ „๋žต, ๋งค๋„๊ฐ€) row_data = shortterm_sell_sheet.getting_total_all_values() # ๊ตฌ๊ธ€ ์Šคํ”„๋ ˆ๋“œ์‹œํŠธ '๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง' ์‹œํŠธ ๋ฐ์ดํ„ฐ getting # ์ž‘์„ฑ ์˜ค๋ฅ˜ ์ฒดํฌ๋ฅผ ์œ„ํ•œ ์ฃผ์š” ํ•ญ๋ชฉ์˜ ์œ„์น˜(index)๋ฅผ ์ €์žฅ idx_holding = row_data[0].index('๋ณด์œ ์ผ') idx_strategy = row_data[0].index('๋งค๋„์ „๋žต') idx_loss = row_data[0].index('์†์ ˆ๊ฐ€') idx_sellprice = row_data[0].index('๋ชฉํ‘œ๊ฐ€') if length(row_data) > 1: for row in row_data[1:]: try: code, name, market = getting_code(row[1]) # ์ข…๋ชฉ๋ช…์œผ๋กœ ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ ๋ฐ›์•„์„œ(getting_code ํ•จ์ˆ˜) ์ถ”๊ฐ€ if row[idx_holding] == '' : raise Exception('๋ณด์œ ์ผ ์˜ค๋ฅ˜') if row[idx_strategy] == '': raise Exception('๋งค๋„์ „๋žต ์˜ค๋ฅ˜') if row[idx_loss] == '': raise Exception('์†์ ˆ๊ฐ€ ์˜ค๋ฅ˜') if row[idx_strategy] == '4' and row[idx_sellprice] == '': raise Exception('๋ชฉํ‘œ๊ฐ€ ์˜ค๋ฅ˜') except Exception as e: if str(e) != '๋ณด์œ ์ผ ์˜ค๋ฅ˜' and str(e) != '๋งค๋„์ „๋žต ์˜ค๋ฅ˜' and str(e) != '์†์ ˆ๊ฐ€ ์˜ค๋ฅ˜'and str(e) != '๋ชฉํ‘œ๊ฐ€ ์˜ค๋ฅ˜': e = '์ข…๋ชฉ๋ช… ์˜ค๋ฅ˜' print('๊ตฌ๊ธ€ ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์˜ค๋ฅ˜ : %s, %s' % (row[1], e)) logger.error('๊ตฌ๊ธ€ ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์˜ค๋ฅ˜ : %s, %s' % (row[1], e)) Telegram('[XTrader]๊ตฌ๊ธ€ ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์˜ค๋ฅ˜ : %s, %s' % (row[1], e)) # print(data) print('[XTrader]๊ตฌ๊ธ€ ์‹œํŠธ ํ™•์ธ ์™„๋ฃŒ') # Telegram('[XTrader]๊ตฌ๊ธ€ ์‹œํŠธ ํ™•์ธ ์™„๋ฃŒ') # logger.info('[XTrader]๊ตฌ๊ธ€ ์‹œํŠธ ํ™•์ธ ์™„๋ฃŒ') return data except Exception as e: # ๊ตฌ๊ธ€ ์‹œํŠธ import error์‹œ ์—๋Ÿฌ ์—†์–ด์„ ๋•Œ ๋ฐฑ์—…ํ•œ csv ์ฝ์–ด์˜ด print("import_googlesheet Error : %s"%e) logger.error("import_googlesheet Error : %s"%e) backup_file = datetime.date.today().strftime('%Y%m%d') + '_googlesheetdata.csv' if backup_file in os.listandardir(): data = mk.read_csv(backup_file, encoding='euc-kr') data = data.fillnone('') data = data.totype(str) data['์ข…๋ชฉ์ฝ”๋“œ'] = data['์ข…๋ชฉ์ฝ”๋“œ'].employ(fix_stockcode) print("import googlesheet backup_file") logger.info("import googlesheet backup_file") return data # Telegram Setting ***************************************** with open('./secret/telegram_token.txt', mode='r') as tokenfile: TELEGRAM_TOKEN = tokenfile.readline().strip() with open('./secret/chatid.txt', mode='r') as chatfile: CHAT_ID = int(chatfile.readline().strip()) bot = telepot.Bot(TELEGRAM_TOKEN) with open('./secret/Telegram.txt', mode='r') as tokenfile: r = tokenfile.read() TELEGRAM_TOKEN_yoo = r.split('\n')[0].split(', ')[1] CHAT_ID_yoo = r.split('\n')[1].split(', ')[1] bot_yoo = telepot.Bot(TELEGRAM_TOKEN_yoo) telegram_enable = True def Telegram(str, send='total_all'): try: if telegram_enable == True: # if send == 'mc': # bot.sendMessage(CHAT_ID, str) # else: # bot.sendMessage(CHAT_ID, str) # bot_yoo.sendMessage(CHAT_ID_yoo, str) bot.sendMessage(CHAT_ID, str) else: pass except Exception as e: Telegram('[StockTrader]Telegram Error : %s' % e, send='mc') # Slack Setting *********************************************** # with open('./secret/slack_token.txt', mode='r') as tokenfile: # SLACK_TOKEN = tokenfile.readline().strip() # slack = Slacker(SLACK_TOKEN) # slack_enable = False # def Slack(str): # if slack_enable == True: # slack.chat.post_message('#log', str) # else: # pass # ๋งค์ˆ˜ ํ›„ ๋ณด์œ ๊ธฐ๊ฐ„ ๊ณ„์‚ฐ ***************************************** today = datetime.date.today() def holdingcal(base_date, excluded=(6, 7)): # ์˜ˆ์‹œ base_date = '2018-06-23' yy = int(base_date[:4]) # ์—ฐ๋„ mm = int(base_date[5:7]) # ์›” dd = int(base_date[8:10]) # ์ผ base_d = datetime.date(yy, mm, dd) delta = 0 while base_d <= today: if base_d.isoweekday() not in excluded: delta += 1 base_d += datetime.timedelta(days=1) return delta # ๋‹น์ผ๋„ 1์ผ๋กœ ๊ณ„์‚ฐ๋จ # ํ˜ธ๊ฐ€ ๊ณ„์‚ฐ(์ƒํ•œ๊ฐ€, ํ˜„์žฌ๊ฐ€) ************************************* def hogacal(price, diff, market, option): # diff 0 : ์ƒํ•œ๊ฐ€ ํ˜ธ๊ฐ€, -1 : ์ƒํ•œ๊ฐ€ -1ํ˜ธ๊ฐ€ if option == 'ํ˜„์žฌ๊ฐ€': cal_price = price elif option == '์ƒํ•œ๊ฐ€': cal_price = price * 1.3 if cal_price < 1000: hogaunit = 1 elif cal_price < 5000: hogaunit = 5 elif cal_price < 10000: hogaunit = 10 elif cal_price < 50000: hogaunit = 50 elif cal_price < 100000 and market == "KOSPI": hogaunit = 100 elif cal_price < 500000 and market == "KOSPI": hogaunit = 500 elif cal_price >= 500000 and market == "KOSPI": hogaunit = 1000 elif cal_price >= 50000 and market == "KOSDAQ": hogaunit = 100 cal_price = int(cal_price / hogaunit) * hogaunit + (hogaunit * diff) return cal_price # ์ข…๋ชฉ๋ณ„ ํ˜„์žฌ๊ฐ€ ํฌ๋กค๋ง ****************************************** def crawler_price(code): code = code[1:] url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code) response = requests.getting(url) soup = BeautifulSoup(response.text, 'html.parser') tag = soup.find("td", {"class": "num"}) return int(tag.text.replacing(',','')) ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ = None ์ฃผ๋ฌธ๋”œ๋ ˆ์ด = 0.25 ์ดˆ๋‹นํšŸ์ˆ˜์ œํ•œ = 5 ## ํ‚ค์›€์ฆ๊ถŒ ์ œ์•ฝ์‚ฌํ•ญ - 3.7์ดˆ์— ํ•œ๋ฒˆ ์ฝ์œผ๋ฉด ์ง€๊ธˆ๊นŒ์ง€๋Š” ๊ดœ์ฐฎ์Œ ์ฃผ๋ฌธ์ง€์—ฐ = 3700 # 3.7์ดˆ ๋กœ๋ด‡์Šคํฌ๋ฆฐ๋ฒˆํ˜ธ์‹œ์ž‘ = 9000 ๋กœ๋ด‡์Šคํฌ๋ฆฐ๋ฒˆํ˜ธ์ข…๋ฃŒ = 9999 # Table View ๋ฐ์ดํ„ฐ ์ •๋ฆฌ class MonkeyModel(QtCore.QAbstractTableModel): def __init__(self, data=None, parent=None): QtCore.QAbstractTableModel.__init__(self, parent) self._data = data if data is None: self._data = KnowledgeFrame() def rowCount(self, parent=None): # return length(self._data.values) return length(self._data.index) def columnCount(self, parent=None): return self._data.columns.size def data(self, index, role=Qt.DisplayRole): if index.isValid(): if role == Qt.DisplayRole: # return QtCore.QVariant(str(self._data.values[index.row()][index.column()])) return str(self._data.values[index.row()][index.column()]) # return QtCore.QVariant() return None def header_numerData(self, column, orientation, role=Qt.DisplayRole): if role != Qt.DisplayRole: return None if orientation == Qt.Horizontal: return self._data.columns[column] return int(column + 1) def umkate(self, data): self._data = data self.reset() def reset(self): self.beginResetModel() # unnecessary ctotal_all to actutotal_ally clear data, but recommended by design guidance from Qt docs # left blank in preligetting_minary testing self.endResetModel() def flags(self, index): return QtCore.Qt.ItemIsEnabled # ํฌํŠธํด๋ฆฌ์˜ค์— ์‚ฌ์šฉ๋˜๋Š” ์ฃผ์‹์ •๋ณด ํด๋ž˜์Šค # TradeShortTerm์šฉ ํฌํŠธํด๋ฆฌ์˜ค class CPortStock_ShortTerm(object): def __init__(self, ๋ฒˆํ˜ธ, ๋งค์ˆ˜์ผ, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ, ๋งค์ˆ˜๊ฐ€, ๋งค์ˆ˜์กฐ๊ฑด, ๋ณด์œ ์ผ, ๋งค๋„์ „๋žต, ๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด, ๋งค๋„๊ตฌ๊ฐ„=1, ๋งค๋„๊ฐ€=0, ์ˆ˜๋Ÿ‰=0): self.๋ฒˆํ˜ธ = ๋ฒˆํ˜ธ self.๋งค์ˆ˜์ผ = ๋งค์ˆ˜์ผ self.์ข…๋ชฉ์ฝ”๋“œ = ์ข…๋ชฉ์ฝ”๋“œ self.์ข…๋ชฉ๋ช… = ์ข…๋ชฉ๋ช… self.์‹œ์žฅ = ์‹œ์žฅ self.๋งค์ˆ˜๊ฐ€ = ๋งค์ˆ˜๊ฐ€ self.๋งค์ˆ˜์กฐ๊ฑด = ๋งค์ˆ˜์กฐ๊ฑด self.๋ณด์œ ์ผ = ๋ณด์œ ์ผ self.๋งค๋„์ „๋žต = ๋งค๋„์ „๋žต self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด = ๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด self.๋งค๋„๊ตฌ๊ฐ„ = ๋งค๋„๊ตฌ๊ฐ„ self.๋งค๋„๊ฐ€ = ๋งค๋„๊ฐ€ self.์ˆ˜๋Ÿ‰ = ์ˆ˜๋Ÿ‰ if self.๋งค๋„์ „๋žต == '2' or self.๋งค๋„์ „๋žต == '3': self.๋ชฉํ‘œ๋„๋‹ฌ = False # ๋ชฉํ‘œ๊ฐ€(๋งค๋„๊ฐ€) ๋„๋‹ฌ ์ฒดํฌ(False ์ƒํƒœ๋กœ ๊ตฌ๊ฐ„ ์ปท์ผ๊ฒฝ์šฐ ์ „๋Ÿ‰ ๋งค๋„) self.๋งค๋„์กฐ๊ฑด = '' # ๊ตฌ๊ฐ„๋งค๋„ : B, ๋ชฉํ‘œ๋งค๋„ : T elif self.๋งค๋„์ „๋žต == '4': self.sellcount = 0 self.๋งค๋„๋‹จ์œ„์ˆ˜๋Ÿ‰ = 0 # ์ „๋žต4์˜ ๊ธฐ๋ณธ ๋งค๋„ ๋‹จ์œ„๋Š” ๋ณด์œ ์ˆ˜๋Ÿ‰์˜ 1/3 self.์ต์ ˆ๊ฐ€1๋„๋‹ฌ = False self.์ต์ ˆ๊ฐ€2๋„๋‹ฌ = False self.๋ชฉํ‘œ๊ฐ€๋„๋‹ฌ = False # TradeLongTerm์šฉ ํฌํŠธํด๋ฆฌ์˜ค class CPortStock_LongTerm(object): def __init__(self, ๋งค์ˆ˜์ผ, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ, ๋งค์ˆ˜๊ฐ€, ์ˆ˜๋Ÿ‰=0): self.๋งค์ˆ˜์ผ = ๋งค์ˆ˜์ผ self.์ข…๋ชฉ์ฝ”๋“œ = ์ข…๋ชฉ์ฝ”๋“œ self.์ข…๋ชฉ๋ช… = ์ข…๋ชฉ๋ช… self.์‹œ์žฅ = ์‹œ์žฅ self.๋งค์ˆ˜๊ฐ€ = ๋งค์ˆ˜๊ฐ€ self.์ˆ˜๋Ÿ‰ = ์ˆ˜๋Ÿ‰ # ๊ธฐ๋ณธ ๋กœ๋ด‡์šฉ ํฌํŠธํด๋ฆฌ์˜ค class CPortStock(object): def __init__(self, ๋งค์ˆ˜์ผ, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ, ๋งค์ˆ˜๊ฐ€, ๋ณด์œ ์ผ, ๋งค๋„์ „๋žต, ๋งค๋„๊ตฌ๊ฐ„=0, ๋งค๋„์ „๋žต๋ณ€๊ฒฝ1=False, ๋งค๋„์ „๋žต๋ณ€๊ฒฝ2=False, ์ˆ˜๋Ÿ‰=0): self.๋งค์ˆ˜์ผ = ๋งค์ˆ˜์ผ self.์ข…๋ชฉ์ฝ”๋“œ = ์ข…๋ชฉ์ฝ”๋“œ self.์ข…๋ชฉ๋ช… = ์ข…๋ชฉ๋ช… self.์‹œ์žฅ = ์‹œ์žฅ self.๋งค์ˆ˜๊ฐ€ = ๋งค์ˆ˜๊ฐ€ self.๋ณด์œ ์ผ = ๋ณด์œ ์ผ self.๋งค๋„์ „๋žต = ๋งค๋„์ „๋žต self.๋งค๋„๊ตฌ๊ฐ„ = ๋งค๋„๊ตฌ๊ฐ„ self.๋งค๋„์ „๋žต๋ณ€๊ฒฝ1 = ๋งค๋„์ „๋žต๋ณ€๊ฒฝ1 self.๋งค๋„์ „๋žต๋ณ€๊ฒฝ2 = ๋งค๋„์ „๋žต๋ณ€๊ฒฝ2 self.์ˆ˜๋Ÿ‰ = ์ˆ˜๋Ÿ‰ # CTrade ๊ฑฐ๋ž˜๋กœ๋ด‡์šฉ ๋ฒ ์ด์Šคํด๋ž˜์Šค : OpenAPI์™€ ๋ถ™์–ด์„œ ์ฃผ๋ฌธ์„ ๋‚ด๋Š” ๋“ฑ์„ ํ•˜๋Š” ํด๋ž˜์Šค class CTrade(object): def __init__(self, sName, UUID, kiwoom=None, parent=None): """ :param sName: ๋กœ๋ด‡์ด๋ฆ„ :param UUID: ๋กœ๋ด‡๊ตฌ๋ถ„์šฉ id :param kiwoom: ํ‚ค์›€OpenAPI :param parent: ๋‚˜๋ฅผ ๋ถ€๋ฅธ ๋ถ€๋ชจ - ๋ณดํ†ต์€ ๋ฉ”์ธ์œˆ๋„์šฐ """ # print("CTrade : __init__") self.sName = sName self.UUID = UUID self.sAccount = None # ๊ฑฐ๋ž˜์šฉ๊ณ„์ขŒ๋ฒˆํ˜ธ self.kiwoom = kiwoom self.parent = parent self.running = False # ์‹คํ–‰์ƒํƒœ self.portfolio = dict() # ํฌํŠธํด๋ฆฌ์˜ค ๊ด€๋ฆฌ {'์ข…๋ชฉ์ฝ”๋“œ':์ข…๋ชฉ์ •๋ณด} self.ํ˜„์žฌ๊ฐ€ = dict() # ๊ฐ ์ข…๋ชฉ์˜ ํ˜„์žฌ๊ฐ€ # ์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ์ข…๋ชฉ ์ฝ๊ธฐ def GetCodes(self, Index, Name, Type): logger.info("[%s]์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ์ข…๋ชฉ ์ฝ๊ธฐ"%(self.sName)) # self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition) # self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer) # self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition) try: self.gettingConditionLoad() print('gettingload ์™„๋ฃŒ') print('์กฐ๊ฑด ๊ฒ€์ƒ‰ :', Name, int(Index), Type) codelist = self.sendCondition("0156", Name, int(Index), Type) # ์„ ์ •๋œ ๊ฒ€์ƒ‰์กฐ๊ฑด์‹์œผ๋กœ ๋ฐ”๋กœ ์ข…๋ชฉ ๊ฒ€์ƒ‰ print('GetCodes :', self.codeList) return self.codeList except Exception as e: print("GetCondition_Error") print(e) def gettingConditionLoad(self): print('gettingConditionLoad') self.kiwoom.dynamicCtotal_all("GetConditionLoad()") # receiveConditionVer() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ self.ConditionLoop = QEventLoop() self.ConditionLoop.exec_() def gettingConditionNameList(self): print('gettingConditionNameList') data = self.kiwoom.dynamicCtotal_all("GetConditionNameList()") conditionList = data.split(';') del conditionList[-1] conditionDictionary = {} for condition in conditionList: key, value = condition.split('^') conditionDictionary[int(key)] = value # print(conditionDictionary) return conditionDictionary # ์กฐ๊ฑด์‹ ์กฐํšŒ def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime): print("CTrade : sendCondition", screenNo, conditionName, conditionIndex, isRealTime) isRequest = self.kiwoom.dynamicCtotal_all("SendCondition(QString, QString, int, int)", screenNo, conditionName, conditionIndex, isRealTime) # receiveTrCondition() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ # ์‹ค์‹œ๊ฐ„ ๊ฒ€์ƒ‰์ผ ๊ฒฝ์šฐ Loop ๋ฏธ์ ์šฉํ•ด์„œ ๋ฐ”๋กœ ์กฐํšŒ ๋“ฑ๋ก์ด ๋˜๊ฒŒ ํ•ด์•ผ๋จ # if self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž… ==0: self.ConditionLoop = QEventLoop() self.ConditionLoop.exec_() # ์กฐ๊ฑด์‹ ์กฐํšŒ ์ค‘์ง€ def sendConditionStop(self, screenNo, conditionName, conditionIndex): # print("CTrade : sendConditionStop", screenNo, conditionName, conditionIndex) isRequest = self.kiwoom.dynamicCtotal_all("SendConditionStop(QString, QString, int)", screenNo, conditionName, conditionIndex) # ๊ณ„์ขŒ ๋ณด์œ  ์ข…๋ชฉ ๋ฐ›์Œ def InquiryList(self, _repeat=0): # print("CTrade : InquiryList") ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ณ„์ขŒ๋ฒˆํ˜ธ", self.sAccount) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๋น„๋ฐ€๋ฒˆํ˜ธ์ž…๋ ฅ๋งค์ฒด๊ตฌ๋ถ„", '00') ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์กฐํšŒ๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "๊ณ„์ขŒํ‰๊ฐ€์ž”๊ณ ๋‚ด์—ญ์š”์ฒญ", "opw00018", _repeat, '{:04d}'.formating(self.sScreenNo)) self.InquiryLoop = QEventLoop() # ๋กœ๋ด‡์—์„œ ๋ฐ”๋กœ ์“ธ ์ˆ˜ ์žˆ๋„๋กํ•˜๊ธฐ ์œ„ํ•ด์„œ ๊ณ„์ขŒ ์กฐํšŒํ•ด์„œ ์ข…๋ชฉ์„ ๋ฐ›๊ณ ๋‚˜์„œ ๋ฃจํ”„ํ•ด์ œ์‹œํ‚ด self.InquiryLoop.exec_() # ๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ์— ๋Œ€ํ•ด์„œ ์ˆ˜์ต๋ฅ , ์ˆ˜์ต๊ธˆ, ์ˆ˜์ˆ˜๋ฃŒ ์š”์ฒญ(์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต์š”์ฒญ) def DailyProfit(self, ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ): _repeat = 0 # self.sAccount = ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ # self.sScreenNo = self.ScreenNumber ์‹œ์ž‘์ผ์ž = datetime.date.today().strftime('%Y%m%d') cnt = 1 for ์ข…๋ชฉ์ฝ”๋“œ in ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: # print(self.sScreenNo, ์ข…๋ชฉ์ฝ”๋“œ, ์‹œ์ž‘์ผ์ž) self.umkate_cnt = length(๊ธˆ์ผ๋งค๋„์ข…๋ชฉ) - cnt cnt += 1 ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ณ„์ขŒ๋ฒˆํ˜ธ", self.sAccount) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", ์ข…๋ชฉ์ฝ”๋“œ) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์‹œ์ž‘์ผ์ž", ์‹œ์ž‘์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ผ์ž๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต์š”์ฒญ", "OPT10072", _repeat, '{:04d}'.formating(self.sScreenNo)) self.DailyProfitLoop = QEventLoop() # ๋กœ๋ด‡์—์„œ ๋ฐ”๋กœ ์“ธ ์ˆ˜ ์žˆ๋„๋กํ•˜๊ธฐ ์œ„ํ•ด์„œ ๊ณ„์ขŒ ์กฐํšŒํ•ด์„œ ์ข…๋ชฉ์„ ๋ฐ›๊ณ ๋‚˜์„œ ๋ฃจํ”„ํ•ด์ œ์‹œํ‚ด self.DailyProfitLoop.exec_() # ์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต ์‘๋‹ต ๊ฒฐ๊ณผ ๊ตฌ๊ธ€ ์—…๋กœ๋“œ def DailyProfitUpload(self, ๋งค๋„๊ฒฐ๊ณผ): # ๋งค๋„๊ฒฐ๊ณผ ['์ข…๋ชฉ๋ช…','์ฒด๊ฒฐ๋Ÿ‰','๋งค์ž…๋‹จ๊ฐ€','์ฒด๊ฒฐ๊ฐ€','๋‹น์ผ๋งค๋„์†์ต','์†์ต์œจ','๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ','๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] print(๋งค๋„๊ฒฐ๊ณผ) if self.sName == 'TradeShortTerm': history_sheet = shortterm_history_sheet history_cols = shortterm_history_cols elif self.sName == 'TradeCondition': history_sheet = condition_history_sheet history_cols = condition_history_cols try: code_row = history_sheet.findtotal_all(๋งค๋„๊ฒฐ๊ณผ[0])[-1].row ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ  = value_round((int(float(๋งค๋„๊ฒฐ๊ณผ[3])) / int(float(๋งค๋„๊ฒฐ๊ณผ[2])) - 1) * 100, 2) cell = alpha_list[history_cols.index('๋งค์ˆ˜๊ฐ€')] + str(code_row) # ๋งค์ž…๋‹จ๊ฐ€ history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[2]))) cell = alpha_list[history_cols.index('๋งค๋„๊ฐ€')] + str(code_row) # ์ฒด๊ฒฐ๊ฐ€ history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[3]))) cell = alpha_list[history_cols.index('์ˆ˜์ต๋ฅ (๊ณ„์‚ฐ)')] + str(code_row) # ์ˆ˜์ต๋ฅ  ๊ณ„์‚ฐ history_sheet.umkate_acell(cell, ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ ) cell = alpha_list[history_cols.index('์ˆ˜์ต๋ฅ ')] + str(code_row) # ์†์ต์œจ history_sheet.umkate_acell(cell, ๋งค๋„๊ฒฐ๊ณผ[5]) cell = alpha_list[history_cols.index('์ˆ˜์ต๊ธˆ')] + str(code_row) # ์†์ต์œจ history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[4]))) cell = alpha_list[history_cols.index('์„ธ๊ธˆ+์ˆ˜์ˆ˜๋ฃŒ')] + str(code_row) # ๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ + ๋‹น์ผ๋งค๋งค์„ธ๊ธˆ history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[6])) + int(float(๋งค๋„๊ฒฐ๊ณผ[7]))) self.DailyProfitLoop.exit() if self.umkate_cnt == 0: print('๊ธˆ์ผ ์‹คํ˜„ ์†์ต ๊ตฌ๊ธ€ ์—…๋กœ๋“œ ์™„๋ฃŒ') Telegram("[StockTrader]๊ธˆ์ผ ์‹คํ˜„ ์†์ต ๊ตฌ๊ธ€ ์—…๋กœ๋“œ ์™„๋ฃŒ") logger.info("[StockTrader]๊ธˆ์ผ ์‹คํ˜„ ์†์ต ๊ตฌ๊ธ€ ์—…๋กœ๋“œ ์™„๋ฃŒ") except: self.DailyProfitLoop.exit() # ๊ฐ•์ œ ๋ฃจํ”„ ํ•ด์ œ print('[StockTrader]CTrade:DailyProfitUpload_%s ๋งค๋„ ์ด๋ ฅ ์—†์Œ' % ๋งค๋„๊ฒฐ๊ณผ[0]) logger.error('CTrade:DailyProfitUpload_%s ๋งค๋„ ์ด๋ ฅ ์—†์Œ' % ๋งค๋„๊ฒฐ๊ณผ[0]) # ํฌํŠธํด๋ฆฌ์˜ค์˜ ์ƒํƒœ def GetStatus(self): # print("CTrade : GetStatus") try: result = [] for p, v in self.portfolio.items(): result.adding('%s(%s)[P%s/V%s/D%s]' % (v.์ข…๋ชฉ๋ช….strip(), v.์ข…๋ชฉ์ฝ”๋“œ, v.๋งค์ˆ˜๊ฐ€, v.์ˆ˜๋Ÿ‰, v.๋งค์ˆ˜์ผ)) return [self.__class__.__name__, self.sName, self.UUID, self.sScreenNo, self.running, length(self.portfolio), ','.join(result)] except Exception as e: print('CTrade_GetStatus Error', e) logger.error('CTrade_GetStatus Error : %s' % e) def GenScreenNO(self): """ :return: ํ‚ค์›€์ฆ๊ถŒ์—์„œ ์š”๊ตฌํ•˜๋Š” ์Šคํฌ๋ฆฐ๋ฒˆํ˜ธ๋ฅผ ์ƒ์„ฑ """ # print("CTrade : GenScreenNO") self.Smtotal_allScreenNumber += 1 if self.Smtotal_allScreenNumber > 9999: self.Smtotal_allScreenNumber = 0 return self.sScreenNo * 10000 + self.Smtotal_allScreenNumber def GetLoginInfo(self, tag): """ :param tag: :return: ๋กœ๊ทธ์ธ์ •๋ณด ํ˜ธ์ถœ """ # print("CTrade : GetLoginInfo") return self.kiwoom.dynamicCtotal_all('GetLoginInfo("%s")' % tag) def KiwoomConnect(self): """ :return: ํ‚ค์›€์ฆ๊ถŒOpenAPI์˜ Ctotal_allBack์— ๋Œ€์‘ํ•˜๋Š” ์ฒ˜๋ฆฌํ•จ์ˆ˜๋ฅผ ์—ฐ๊ฒฐ """ # print("CTrade : KiwoomConnect") try: self.kiwoom.OnEventConnect[int].connect(self.OnEventConnect) self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) self.kiwoom.OnReceiveChejanData[str, int, str].connect(self.OnReceiveChejanData) self.kiwoom.OnReceiveRealData[str, str, str].connect(self.OnReceiveRealData) self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer) self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition) except Exception as e: print("CTrade : [%s]KiwoomConnect Error :"&(self.sName, e)) # logger.info("%s : connected" % self.sName) def KiwoomDisConnect(self): """ :return: Ctotal_allback ์—ฐ๊ฒฐํ•ด์ œ """ # print("CTrade : KiwoomDisConnect") try: self.kiwoom.OnEventConnect[int].disconnect(self.OnEventConnect) except Exception: pass try: self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) except Exception: pass try: self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition) except Exception: pass try: self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) except Exception: pass try: self.kiwoom.OnReceiveChejanData[str, int, str].disconnect(self.OnReceiveChejanData) except Exception: pass try: self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer) except Exception: pass try: self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition) except Exception: pass try: self.kiwoom.OnReceiveRealData[str, str, str].disconnect(self.OnReceiveRealData) except Exception: pass # logger.info("%s : disconnected" % self.sName) def KiwoomAccount(self): """ :return: ๊ณ„์ขŒ์ •๋ณด๋ฅผ ์ฝ์–ด์˜ด """ # print("CTrade : KiwoomAccount") ACCOUNT_CNT = self.GetLoginInfo('ACCOUNT_CNT') ACC_NO = self.GetLoginInfo('ACCNO') self.account = ACC_NO.split(';')[0:-1] self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ณ„์ขŒ๋ฒˆํ˜ธ", self.account[0]) self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "d+2์˜ˆ์ˆ˜๊ธˆ์š”์ฒญ", "opw00001", 0, '{:04d}'.formating(self.sScreenNo)) self.depositLoop = QEventLoop() # self.d2_deposit๋ฅผ ๋กœ๋ด‡์—์„œ ๋ฐ”๋กœ ์“ธ ์ˆ˜ ์žˆ๋„๋กํ•˜๊ธฐ ์œ„ํ•ด์„œ ์˜ˆ์ˆ˜๊ธˆ์„ ๋ฐ›๊ณ ๋‚˜์„œ ๋ฃจํ”„ํ•ด์ œ์‹œํ‚ด self.depositLoop.exec_() # logger.debug("๋ณด์œ  ๊ณ„์ขŒ์ˆ˜: %s ๊ณ„์ขŒ๋ฒˆํ˜ธ: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO)) def KiwoomSendOrder(self, sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param sRQName: :param sScreenNo: :param sAccNo: :param nOrderType: :param sCode: :param nQty: :param nPrice: :param sHogaGb: :param sOrgOrderNo: :return: """ # print("CTrade : KiwoomSendOrder") try: order = self.kiwoom.dynamicCtotal_all( 'SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)', [sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo]) return order except Exception as e: print('CTrade_KiwoomSendOrder Error ', e) Telegram('[StockTrader]CTrade_KiwoomSendOrder Error: %s' % e, send='mc') logger.error('CTrade_KiwoomSendOrder Error : %s' % e) # -๊ฑฐ๋ž˜๊ตฌ๋ถ„๊ฐ’ ํ™•์ธ(2์ž๋ฆฌ) # # 00 : ์ง€์ •๊ฐ€ # 03 : ์‹œ์žฅ๊ฐ€ # 05 : ์กฐ๊ฑด๋ถ€์ง€์ •๊ฐ€ # 06 : ์ตœ์œ ๋ฆฌ์ง€์ •๊ฐ€ # 07 : ์ตœ์šฐ์„ ์ง€์ •๊ฐ€ # 10 : ์ง€์ •๊ฐ€IOC # 13 : ์‹œ์žฅ๊ฐ€IOC # 16 : ์ตœ์œ ๋ฆฌIOC # 20 : ์ง€์ •๊ฐ€FOK # 23 : ์‹œ์žฅ๊ฐ€FOK # 26 : ์ตœ์œ ๋ฆฌFOK # 61 : ์žฅ์ „ ์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค # 81 : ์žฅํ›„ ์‹œ๊ฐ„์™ธ์ข…๊ฐ€ # 62 : ์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค # # -๋งค๋งค๊ตฌ๋ถ„๊ฐ’ (1 ์ž๋ฆฌ) # 1 : ์‹ ๊ทœ๋งค์ˆ˜ # 2 : ์‹ ๊ทœ๋งค๋„ # 3 : ๋งค์ˆ˜์ทจ์†Œ # 4 : ๋งค๋„์ทจ์†Œ # 5 : ๋งค์ˆ˜์ •์ • # 6 : ๋งค๋„์ •์ • def KiwoomSetRealReg(self, sScreenNo, sCode, sRealType='0'): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param sScreenNo: :param sCode: :param sRealType: :return: """ # print("CTrade : KiwoomSetRealReg") ret = self.kiwoom.dynamicCtotal_all('SetRealReg(QString, QString, QString, QString)', sScreenNo, sCode, '9001;10', sRealType) return ret def KiwoomSetRealRemove(self, sScreenNo, sCode): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param sScreenNo: :param sCode: :return: """ # print("CTrade : KiwoomSetRealRemove") ret = self.kiwoom.dynamicCtotal_all('SetRealRemove(QString, QString)', sScreenNo, sCode) return ret def OnEventConnect(self, nErrCode): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param nErrCode: :return: """ # print("CTrade : OnEventConnect") logger.debug('OnEventConnect', nErrCode) def OnReceiveMsg(self, sScrNo, sRQName, sTRCode, sMsg): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param sScrNo: :param sRQName: :param sTRCode: :param sMsg: :return: """ # print("CTrade : OnReceiveMsg") logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTRCode, sMsg)) # self.InquiryLoop.exit() def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param sScrNo: :param sRQName: :param sTRCode: :param sRecordName: :param sPreNext: :param nDataLength: :param sErrorCode: :param sMessage: :param sSPlmMsg: :return: """ # print('CTrade : OnReceiveTrData') try: # logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) if self.sScreenNo != int(sScrNo[:4]): return if 'B_' in sRQName or 'S_' in sRQName: ์ฃผ๋ฌธ๋ฒˆํ˜ธ = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, 0, "์ฃผ๋ฌธ๋ฒˆํ˜ธ") # logger.debug("ํ™”๋ฉด๋ฒˆํ˜ธ: %s sRQName : %s ์ฃผ๋ฌธ๋ฒˆํ˜ธ: %s" % (sScrNo, sRQName, ์ฃผ๋ฌธ๋ฒˆํ˜ธ)) self.์ฃผ๋ฌธ๋“ฑ๋ก(sRQName, ์ฃผ๋ฌธ๋ฒˆํ˜ธ) if sRQName == "d+2์˜ˆ์ˆ˜๊ธˆ์š”์ฒญ": data = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)',sTRCode, "", sRQName, 0, "d+2์ถ”์ •์˜ˆ์ˆ˜๊ธˆ") # ์ž…๋ ฅ๋œ ๋ฌธ์ž์—ด์— ๋Œ€ํ•ด lstrip ๋ฉ”์„œ๋“œ๋ฅผ ํ†ตํ•ด ๋ฌธ์ž์—ด ์™ผ์ชฝ์— ์กด์žฌํ•˜๋Š” '-' ๋˜๋Š” '0'์„ ์ œ๊ฑฐ. ๊ทธ๋ฆฌ๊ณ  formating ํ•จ์ˆ˜๋ฅผ ํ†ตํ•ด ์ฒœ์˜ ์ž๋ฆฌ๋งˆ๋‹ค ์ฝค๋งˆ๋ฅผ ์ถ”๊ฐ€ํ•œ ๋ฌธ์ž์—ด๋กœ ๋ณ€๊ฒฝ strip_data = data.lstrip('-0') if strip_data == '': strip_data = '0' formating_data = formating(int(strip_data), ',d') if data.startswith('-'): formating_data = '-' + formating_data self.sAsset = formating_data self.depositLoop.exit() # self.d2_deposit๋ฅผ ๋กœ๋ด‡์—์„œ ๋ฐ”๋กœ ์“ธ ์ˆ˜ ์žˆ๋„๋กํ•˜๊ธฐ ์œ„ํ•ด์„œ ์˜ˆ์ˆ˜๊ธˆ์„ ๋ฐ›๊ณ ๋‚˜์„œ ๋ฃจํ”„ํ•ด์ œ์‹œํ‚ด if sRQName == "๊ณ„์ขŒํ‰๊ฐ€์ž”๊ณ ๋‚ด์—ญ์š”์ฒญ": print("๊ณ„์ขŒํ‰๊ฐ€์ž”๊ณ ๋‚ด์—ญ์š”์ฒญ_์ˆ˜์‹ ") cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) self.CList = [] for i in range(0, cnt): S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, '์ข…๋ชฉ๋ฒˆํ˜ธ').strip().lstrip('0') # print(S) if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') S = self.์ข…๋ชฉ์ฝ”๋“œ๋ณ€ํ™˜(S) # ์ข…๋ชฉ์ฝ”๋“œ ๋งจ ์ฒซ 'A'๋ฅผ ์‚ญ์ œํ•˜๊ธฐ ์œ„ํ•จ self.CList.adding(S) # logger.debug("%s" % row) if sPreNext == '2': self.remained_data = True self.InquiryList(_repeat=2) else: self.remained_data = False print(self.CList) self.InquiryLoop.exit() if sRQName == "์ผ์ž๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต์š”์ฒญ": try: data_idx = ['์ข…๋ชฉ๋ช…', '์ฒด๊ฒฐ๋Ÿ‰', '๋งค์ž…๋‹จ๊ฐ€', '์ฒด๊ฒฐ๊ฐ€', '๋‹น์ผ๋งค๋„์†์ต', '์†์ต์œจ', '๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ', '๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] result = [] for idx in data_idx: data = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, 0, idx) result.adding(data.strip()) self.DailyProfitUpload(result) except Exception as e: print(e) logger.error('์ผ์ž๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต์š”์ฒญ Error : %s' % e) except Exception as e: print('CTrade_OnReceiveTrData Error ', e) Telegram('[StockTrader]CTrade_OnReceiveTrData Error : %s' % e, send='mc') logger.error('CTrade_OnReceiveTrData Error : %s' % e) def OnReceiveChejanData(self, sGubun, nItemCnt, sFidList): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param sGubun: :param nItemCnt: :param sFidList: :return: """ # logger.debug('OnReceiveChejanData [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList)) # ์ฃผ๋ฌธ์ฒด๊ฒฐ์‹œ ์ˆœ์„œ # 1 ๊ตฌ๋ถ„:0 GetChejanData(913) = '์ ‘์ˆ˜' # 2 ๊ตฌ๋ถ„:0 GetChejanData(913) = '์ฒด๊ฒฐ' # 3 ๊ตฌ๋ถ„:1 ์ž”๊ณ ์ •๋ณด """ # sFid๋ณ„ ์ฃผ์š”๋ฐ์ดํ„ฐ๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค. # "9201" : "๊ณ„์ขŒ๋ฒˆํ˜ธ" # "9203" : "์ฃผ๋ฌธ๋ฒˆํ˜ธ" # "9001" : "์ข…๋ชฉ์ฝ”๋“œ" # "913" : "์ฃผ๋ฌธ์ƒํƒœ" # "302" : "์ข…๋ชฉ๋ช…" # "900" : "์ฃผ๋ฌธ์ˆ˜๋Ÿ‰" # "901" : "์ฃผ๋ฌธ๊ฐ€๊ฒฉ" # "902" : "๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰" # "903" : "์ฒด๊ฒฐ๋ˆ„๊ณ„๊ธˆ์•ก" # "904" : "์›์ฃผ๋ฌธ๋ฒˆํ˜ธ" # "905" : "์ฃผ๋ฌธ๊ตฌ๋ถ„" # "906" : "๋งค๋งค๊ตฌ๋ถ„" # "907" : "๋งค๋„์ˆ˜๊ตฌ๋ถ„" # "908" : "์ฃผ๋ฌธ/์ฒด๊ฒฐ์‹œ๊ฐ„" # "909" : "์ฒด๊ฒฐ๋ฒˆํ˜ธ" # "910" : "์ฒด๊ฒฐ๊ฐ€" # "911" : "์ฒด๊ฒฐ๋Ÿ‰" # "10" : "ํ˜„์žฌ๊ฐ€" # "27" : "(์ตœ์šฐ์„ )๋งค๋„ํ˜ธ๊ฐ€" # "28" : "(์ตœ์šฐ์„ )๋งค์ˆ˜ํ˜ธ๊ฐ€" # "914" : "๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€" # "915" : "๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰" # "919" : "๊ฑฐ๋ถ€์‚ฌ์œ " # "920" : "ํ™”๋ฉด๋ฒˆํ˜ธ" # "917" : "์‹ ์šฉ๊ตฌ๋ถ„" # "916" : "๋Œ€์ถœ์ผ" # "930" : "๋ณด์œ ์ˆ˜๋Ÿ‰" # "931" : "๋งค์ž…๋‹จ๊ฐ€" # "932" : "์ด๋งค์ž…๊ฐ€" # "933" : "์ฃผ๋ฌธ๊ฐ€๋Šฅ์ˆ˜๋Ÿ‰" # "945" : "๋‹น์ผ์ˆœ๋งค์ˆ˜์ˆ˜๋Ÿ‰" # "946" : "๋งค๋„/๋งค์ˆ˜๊ตฌ๋ถ„" # "950" : "๋‹น์ผ์ด๋งค๋„์†์ผ" # "951" : "์˜ˆ์ˆ˜๊ธˆ" # "307" : "๊ธฐ์ค€๊ฐ€" # "8019" : "์†์ต์œจ" # "957" : "์‹ ์šฉ๊ธˆ์•ก" # "958" : "์‹ ์šฉ์ด์ž" # "918" : "๋งŒ๊ธฐ์ผ" # "990" : "๋‹น์ผ์‹คํ˜„์†์ต(์œ ๊ฐ€)" # "991" : "๋‹น์ผ์‹คํ˜„์†์ต๋ฅ (์œ ๊ฐ€)" # "992" : "๋‹น์ผ์‹คํ˜„์†์ต(์‹ ์šฉ)" # "993" : "๋‹น์ผ์‹คํ˜„์†์ต๋ฅ (์‹ ์šฉ)" # "397" : "ํŒŒ์ƒ์ƒํ’ˆ๊ฑฐ๋ž˜๋‹จ์œ„" # "305" : "์ƒํ•œ๊ฐ€" # "306" : "ํ•˜ํ•œ๊ฐ€" """ # print("CTrade : OnReceiveChejanData") try: # ์ ‘์ˆ˜ if sGubun == "0": # logger.debug('OnReceiveChejanData: ์ ‘์ˆ˜ [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList)) ํ™”๋ฉด๋ฒˆํ˜ธ = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 920) if length(ํ™”๋ฉด๋ฒˆํ˜ธ.replacing(' ','')) == 0 : # ๋กœ๋ด‡ ์‹คํ–‰์ค‘ ์˜์›…๋ฌธ์œผ๋กœ ์ฃผ๋ฌธ ๋ฐœ์ƒ ์‹œ ํ™”๋ฉด๋ฒˆํ˜ธ๊ฐ€ ' '๋กœ ๋“ค์–ด์™€ ์—๋Ÿฌ๋ฐœ์ƒํ•จ ๋ฐฉ์ง€ print('๋‹ค๋ฅธ ํ”„๋กœ๊ทธ๋žจ์„ ํ†ตํ•œ ๊ฑฐ๋ž˜ ๋ฐœ์ƒ') Telegram('๋‹ค๋ฅธ ํ”„๋กœ๊ทธ๋žจ์„ ํ†ตํ•œ ๊ฑฐ๋ž˜ ๋ฐœ์ƒ', send='mc') logger.info('๋‹ค๋ฅธ ํ”„๋กœ๊ทธ๋žจ์„ ํ†ตํ•œ ๊ฑฐ๋ž˜ ๋ฐœ์ƒ') return elif self.sScreenNo != int(ํ™”๋ฉด๋ฒˆํ˜ธ[:4]): return param = dict() param['sGubun'] = sGubun param['๊ณ„์ขŒ๋ฒˆํ˜ธ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 9201) param['์ฃผ๋ฌธ๋ฒˆํ˜ธ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 9203) param['์ข…๋ชฉ์ฝ”๋“œ'] = self.์ข…๋ชฉ์ฝ”๋“œ๋ณ€ํ™˜(self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 9001)) param['์ฃผ๋ฌธ์—…๋ฌด๋ถ„๋ฅ˜'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 912) # ์ ‘์ˆ˜ / ์ฒด๊ฒฐ ํ™•์ธ # ์ฃผ๋ฌธ์ƒํƒœ(10:์›์ฃผ๋ฌธ, 11:์ •์ •์ฃผ๋ฌธ, 12:์ทจ์†Œ์ฃผ๋ฌธ, 20:์ฃผ๋ฌธํ™•์ธ, 21:์ •์ •ํ™•์ธ, 22:์ทจ์†Œํ™•์ธ, 90-92:์ฃผ๋ฌธ๊ฑฐ๋ถ€) param['์ฃผ๋ฌธ์ƒํƒœ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 913) # ์ ‘์ˆ˜ or ์ฒด๊ฒฐ ํ™•์ธ param['์ข…๋ชฉ๋ช…'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 302).strip() param['์ฃผ๋ฌธ์ˆ˜๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 900) param['์ฃผ๋ฌธ๊ฐ€๊ฒฉ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 901) param['๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 902) param['์ฒด๊ฒฐ๋ˆ„๊ณ„๊ธˆ์•ก'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 903) param['์›์ฃผ๋ฌธ๋ฒˆํ˜ธ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 904) param['์ฃผ๋ฌธ๊ตฌ๋ถ„'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 905) param['๋งค๋งค๊ตฌ๋ถ„'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 906) param['๋งค๋„์ˆ˜๊ตฌ๋ถ„'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 907) param['์ฒด๊ฒฐ์‹œ๊ฐ„'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 908) param['์ฒด๊ฒฐ๋ฒˆํ˜ธ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 909) param['์ฒด๊ฒฐ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 910) param['์ฒด๊ฒฐ๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 911) param['ํ˜„์žฌ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 10) param['๋งค๋„ํ˜ธ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 27) param['๋งค์ˆ˜ํ˜ธ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 28) param['๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 914).strip() param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 915) param['ํ™”๋ฉด๋ฒˆํ˜ธ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 920) param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 938) param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 939) param['์ฒด๊ฒฐ์ˆ˜๋Ÿ‰'] = int(param['์ฃผ๋ฌธ์ˆ˜๋Ÿ‰']) - int(param['๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰']) logger.debug('์ ‘์ˆ˜ - ์ฃผ๋ฌธ์ƒํƒœ:{์ฃผ๋ฌธ์ƒํƒœ} ๊ณ„์ขŒ๋ฒˆํ˜ธ:{๊ณ„์ขŒ๋ฒˆํ˜ธ} ์ฒด๊ฒฐ์‹œ๊ฐ„:{์ฒด๊ฒฐ์‹œ๊ฐ„} ์ฃผ๋ฌธ๋ฒˆํ˜ธ:{์ฃผ๋ฌธ๋ฒˆํ˜ธ} ์ฒด๊ฒฐ๋ฒˆํ˜ธ:{์ฒด๊ฒฐ๋ฒˆํ˜ธ} ์ข…๋ชฉ์ฝ”๋“œ:{์ข…๋ชฉ์ฝ”๋“œ} ์ข…๋ชฉ๋ช…:{์ข…๋ชฉ๋ช…} ์ฒด๊ฒฐ๋Ÿ‰:{์ฒด๊ฒฐ๋Ÿ‰} ์ฒด๊ฒฐ๊ฐ€:{์ฒด๊ฒฐ๊ฐ€} ๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€:{๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€} ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰:{์ฃผ๋ฌธ์ˆ˜๋Ÿ‰} ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰:{์ฒด๊ฒฐ์ˆ˜๋Ÿ‰} ๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰:{๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰} ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰:{๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰} ๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ:{๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ} ๋‹น์ผ๋งค๋งค์„ธ๊ธˆ:{๋‹น์ผ๋งค๋งค์„ธ๊ธˆ}'.formating(**param)) # if param["์ฃผ๋ฌธ์ƒํƒœ"] == "์ ‘์ˆ˜": # self.์ ‘์ˆ˜์ฒ˜๋ฆฌ(param) # if param["์ฃผ๋ฌธ์ƒํƒœ"] == "์ฒด๊ฒฐ": # ๋งค๋„์˜ ๊ฒฝ์šฐ ์ฒด๊ฒฐ๋กœ ์•ˆ๋“ค์–ด์˜ด # self.์ฒด๊ฒฐ์ฒ˜๋ฆฌ(param) self.์ฒด๊ฒฐ์ฒ˜๋ฆฌ(param) # ์ž”๊ณ ํ†ต๋ณด if sGubun == "1": # logger.debug('OnReceiveChejanData: ์ž”๊ณ ํ†ต๋ณด [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList)) param = dict() param['sGubun'] = sGubun param['๊ณ„์ขŒ๋ฒˆํ˜ธ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 9201) param['์ข…๋ชฉ์ฝ”๋“œ'] = self.์ข…๋ชฉ์ฝ”๋“œ๋ณ€ํ™˜(self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 9001)) param['์‹ ์šฉ๊ตฌ๋ถ„'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 917) param['๋Œ€์ถœ์ผ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 916) param['์ข…๋ชฉ๋ช…'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 302).strip() param['ํ˜„์žฌ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 10) param['๋ณด์œ ์ˆ˜๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 930) param['๋งค์ž…๋‹จ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 931) param['์ด๋งค์ž…๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 932) param['์ฃผ๋ฌธ๊ฐ€๋Šฅ์ˆ˜๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 933) param['๋‹น์ผ์ˆœ๋งค์ˆ˜๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 945) param['๋งค๋„๋งค์ˆ˜๊ตฌ๋ถ„'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 946) param['๋‹น์ผ์ด๋งค๋„์†์ต'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 950) param['์˜ˆ์ˆ˜๊ธˆ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 951) param['๋งค๋„ํ˜ธ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 27) param['๋งค์ˆ˜ํ˜ธ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 28) param['๊ธฐ์ค€๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 307) param['์†์ต์œจ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 8019) param['์‹ ์šฉ๊ธˆ์•ก'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 957) param['์‹ ์šฉ์ด์ž'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 958) param['๋งŒ๊ธฐ์ผ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 918) param['๋‹น์ผ์‹คํ˜„์†์ต_์œ ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 990) param['๋‹น์ผ์‹คํ˜„์†์ต๋ฅ _์œ ๊ฐ€'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 991) param['๋‹น์ผ์‹คํ˜„์†์ต_์‹ ์šฉ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 992) param['๋‹น์ผ์‹คํ˜„์†์ต๋ฅ _์‹ ์šฉ'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 993) param['๋‹ด๋ณด๋Œ€์ถœ์ˆ˜๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all('GetChejanData(QString)', 959) logger.debug('์ž”๊ณ ํ†ต๋ณด - ๊ณ„์ขŒ๋ฒˆํ˜ธ:{๊ณ„์ขŒ๋ฒˆํ˜ธ} ์ข…๋ชฉ๋ช…:{์ข…๋ชฉ๋ช…} ๋ณด์œ ์ˆ˜๋Ÿ‰:{๋ณด์œ ์ˆ˜๋Ÿ‰} ๋งค์ž…๋‹จ๊ฐ€:{๋งค์ž…๋‹จ๊ฐ€} ์ด๋งค์ž…๊ฐ€:{์ด๋งค์ž…๊ฐ€} ์†์ต์œจ:{์†์ต์œจ} ๋‹น์ผ์ด๋งค๋„์†์ต:{๋‹น์ผ์ด๋งค๋„์†์ต} ๋‹น์ผ์ˆœ๋งค์ˆ˜๋Ÿ‰:{๋‹น์ผ์ˆœ๋งค์ˆ˜๋Ÿ‰}'.formating(**param)) self.์ž”๊ณ ์ฒ˜๋ฆฌ(param) # ํŠน์ด์‹ ํ˜ธ if sGubun == "3": logger.debug('OnReceiveChejanData: ํŠน์ด์‹ ํ˜ธ [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList)) pass except Exception as e: print('CTrade_OnReceiveChejanData Error ', e) Telegram('[StockTrader]CTrade_OnReceiveChejanData Error : %s' % e, send='mc') logger.error('CTrade_OnReceiveChejanData Error : %s' % e) def OnReceiveRealData(self, sRealKey, sRealType, sRealData): """ OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ :param sRealKey: :param sRealType: :param sRealData: :return: """ # logger.debug('OnReceiveRealData [%s] [%s] [%s]' % (sRealKey, sRealType, sRealData)) _now = datetime.datetime.now() try: if _now.strftime('%H:%M:%S') < '09:00:00': # 9์‹œ ์ด์ „ ๋ฐ์ดํ„ฐ ๋ฒ„๋ฆผ(์žฅ ์‹œ์ž‘ ์ „์— ํ…Œ์ดํ„ฐ ๋“ค์–ด์˜ค๋Š” ๊ฒƒ๋„ ๋งŽ์œผ๋ฏ€๋กœ ๋ฒ„๋ฆฌ๊ธฐ ์œ„ํ•จ) return if sRealKey not in self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ: # ๋ฆฌ์ŠคํŠธ์— ์—†๋Š” ๋ฐ์ดํ„ฐ ๋ฒ„๋ฆผ return if sRealType == "์ฃผ์‹์‹œ์„ธ" or sRealType == "์ฃผ์‹์ฒด๊ฒฐ": param = dict() param['์ข…๋ชฉ์ฝ”๋“œ'] = self.์ข…๋ชฉ์ฝ”๋“œ๋ณ€ํ™˜(sRealKey) param['์ฒด๊ฒฐ์‹œ๊ฐ„'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 20).strip() param['ํ˜„์žฌ๊ฐ€'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 10).strip() param['์ „์ผ๋Œ€๋น„'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 11).strip() param['๋“ฑ๋ฝ๋ฅ '] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 12).strip() param['๋งค๋„ํ˜ธ๊ฐ€'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 27).strip() param['๋งค์ˆ˜ํ˜ธ๊ฐ€'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 28).strip() param['๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 13).strip() param['์‹œ๊ฐ€'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 16).strip() param['๊ณ ๊ฐ€'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 17).strip() param['์ €๊ฐ€'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 18).strip() param['๊ฑฐ๋ž˜ํšŒ์ „์œจ'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 31).strip() param['์‹œ๊ฐ€์ด์•ก'] = self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", sRealType, 311).strip() self.์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ(param) except Exception as e: print('CTrade_OnReceiveRealData Error ', e) Telegram('[StockTrader]CTrade_OnReceiveRealData Error : %s' % e, send='mc') logger.error('CTrade_OnReceiveRealData Error : %s' % e) def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext): print('OnReceiveTrCondition') try: if strCodeList == "": self.ConditionLoop.exit() return [] self.codeList = strCodeList.split(';') del self.codeList[-1] # print(self.codeList) logger.info("[%s]์กฐ๊ฑด ๊ฒ€์ƒ‰ ์™„๋ฃŒ"%(self.sName)) self.ConditionLoop.exit() print('OnReceiveTrCondition :', self.codeList) return self.codeList except Exception as e: print("OnReceiveTrCondition_Error") print(e) def OnReceiveConditionVer(self, lRet, sMsg): print('OnReceiveConditionVer') try: self.condition = self.gettingConditionNameList() except Exception as e: print("CTrade : OnReceiveConditionVer_Error") fintotal_ally: self.ConditionLoop.exit() def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex): # print("CTrade : OnReceiveRealCondition") # OpenAPI ๋ฉ”๋‰ด์–ผ ์ฐธ์กฐ # :param sTrCode: # :param strType: # :param strConditionName: # :param strConditionIndex: # :return: _now = datetime.datetime.now().strftime('%H:%M:%S') if (_now >= '10:00:00' and _now < '13:00:00') or _now >= '15:17:00': # 10์‹œ๋ถ€ํ„ฐ 13์‹œ ์ด์ „ ๋ฐ์ดํ„ฐ ๋ฒ„๋ฆผ, 15์‹œ 17๋ถ„ ๋‹น์ผ ๋งค๋„ ์ฒ˜๋ฆฌ ํ›„ ๋ฐ์ดํ„ฐ ๋ฒ„๋ฆผ return # logger.info('OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex)) print("์‹ค์‹œ๊ฐ„์กฐ๊ฒ€๊ฒ€์ƒ‰_์ข…๋ชฉ์ฝ”๋“œ: %s %s / Time : %s"%(sTrCode, "์ข…๋ชฉํŽธ์ž…" if strType == "I" else "์ข…๋ชฉ์ดํƒˆ", _now)) if strType == 'I': self.์‹ค์‹œ๊ฐ„์กฐ๊ฑด์ฒ˜๋ฆฌ(sTrCode) def ์ข…๋ชฉ์ฝ”๋“œ๋ณ€ํ™˜(self, code): # TR ํ†ตํ•ด์„œ ๋ฐ›์€ ์ข…๋ชฉ ์ฝ”๋“œ์— A๊ฐ€ ๋ถ™์„ ๊ฒฝ์šฐ ์‚ญ์ œ return code.replacing('A', '') def ์ •๋Ÿ‰๋งค์ˆ˜(self, sRQName, ์ข…๋ชฉ์ฝ”๋“œ, ๋งค์ˆ˜๊ฐ€, ์ˆ˜๋Ÿ‰): # sRQName = '์ •๋Ÿ‰๋งค์ˆ˜%s' % self.sScreenNo sScreenNo = self.GenScreenNO() # ์ฃผ๋ฌธ์„ ๋‚ผ๋•Œ ๋งˆ๋‹ค ์Šคํฌ๋ฆฐ๋ฒˆํ˜ธ๋ฅผ ์ƒ์„ฑ sAccNo = self.sAccount nOrderType = 1 # (1:์‹ ๊ทœ๋งค์ˆ˜, 2:์‹ ๊ทœ๋งค๋„ 3:๋งค์ˆ˜์ทจ์†Œ, 4:๋งค๋„์ทจ์†Œ, 5:๋งค์ˆ˜์ •์ •, 6:๋งค๋„์ •์ •) sCode = ์ข…๋ชฉ์ฝ”๋“œ nQty = ์ˆ˜๋Ÿ‰ nPrice = ๋งค์ˆ˜๊ฐ€ sHogaGb = self.๋งค์ˆ˜๋ฐฉ๋ฒ• # 00:์ง€์ •๊ฐ€, 03:์‹œ์žฅ๊ฐ€, 05:์กฐ๊ฑด๋ถ€์ง€์ •๊ฐ€, 06:์ตœ์œ ๋ฆฌ์ง€์ •๊ฐ€, 07:์ตœ์šฐ์„ ์ง€์ •๊ฐ€, 10:์ง€์ •๊ฐ€IOC, 13:์‹œ์žฅ๊ฐ€IOC, 16:์ตœ์œ ๋ฆฌIOC, 20:์ง€์ •๊ฐ€FOK, 23:์‹œ์žฅ๊ฐ€FOK, 26:์ตœ์œ ๋ฆฌFOK, 61:์žฅ๊ฐœ์‹œ์ „์‹œ๊ฐ„์™ธ, 62:์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค, 81:์‹œ๊ฐ„์™ธ์ข…๊ฐ€ if sHogaGb in ['03', '07', '06']: nPrice = 0 sOrgOrderNo = 0 ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo) return ret def ์ •์•ก๋งค์ˆ˜(self, sRQName, ์ข…๋ชฉ์ฝ”๋“œ, ๋งค์ˆ˜๊ฐ€, ๋งค์ˆ˜๊ธˆ์•ก): # sRQName = '์ •์•ก๋งค์ˆ˜%s' % self.sScreenNo try: sScreenNo = self.GenScreenNO() sAccNo = self.sAccount nOrderType = 1 # (1:์‹ ๊ทœ๋งค์ˆ˜, 2:์‹ ๊ทœ๋งค๋„ 3:๋งค์ˆ˜์ทจ์†Œ, 4:๋งค๋„์ทจ์†Œ, 5:๋งค์ˆ˜์ •์ •, 6:๋งค๋„์ •์ •) sCode = ์ข…๋ชฉ์ฝ”๋“œ nQty = ๋งค์ˆ˜๊ธˆ์•ก // ๋งค์ˆ˜๊ฐ€ nPrice = ๋งค์ˆ˜๊ฐ€ sHogaGb = self.๋งค์ˆ˜๋ฐฉ๋ฒ• # 00:์ง€์ •๊ฐ€, 03:์‹œ์žฅ๊ฐ€, 05:์กฐ๊ฑด๋ถ€์ง€์ •๊ฐ€, 06:์ตœ์œ ๋ฆฌ์ง€์ •๊ฐ€, 07:์ตœ์šฐ์„ ์ง€์ •๊ฐ€, 10:์ง€์ •๊ฐ€IOC, 13:์‹œ์žฅ๊ฐ€IOC, 16:์ตœ์œ ๋ฆฌIOC, 20:์ง€์ •๊ฐ€FOK, 23:์‹œ์žฅ๊ฐ€FOK, 26:์ตœ์œ ๋ฆฌFOK, 61:์žฅ๊ฐœ์‹œ์ „์‹œ๊ฐ„์™ธ, 62:์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค, 81:์‹œ๊ฐ„์™ธ์ข…๊ฐ€ if sHogaGb in ['03', '07', '06']: nPrice = 0 sOrgOrderNo = 0 # logger.debug('์ฃผ๋ฌธ - %s %s %s %s %s %s %s %s %s', sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo) ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo) return ret except Exception as e: print('CTrade_์ •์•ก๋งค์ˆ˜ Error ', e) Telegram('[StockTrader]CTrade_์ •์•ก๋งค์ˆ˜ Error : %s' % e, send='mc') logger.error('CTrade_์ •์•ก๋งค์ˆ˜ Error : %s' % e) def ์ •๋Ÿ‰๋งค๋„(self, sRQName, ์ข…๋ชฉ์ฝ”๋“œ, ๋งค๋„๊ฐ€, ์ˆ˜๋Ÿ‰): # sRQName = '์ •๋Ÿ‰๋งค๋„%s' % self.sScreenNo try: sScreenNo = self.GenScreenNO() sAccNo = self.sAccount nOrderType = 2 # (1:์‹ ๊ทœ๋งค์ˆ˜, 2:์‹ ๊ทœ๋งค๋„ 3:๋งค์ˆ˜์ทจ์†Œ, 4:๋งค๋„์ทจ์†Œ, 5:๋งค์ˆ˜์ •์ •, 6:๋งค๋„์ •์ •) sCode = ์ข…๋ชฉ์ฝ”๋“œ nQty = ์ˆ˜๋Ÿ‰ nPrice = ๋งค๋„๊ฐ€ sHogaGb = self.๋งค๋„๋ฐฉ๋ฒ• # 00:์ง€์ •๊ฐ€, 03:์‹œ์žฅ๊ฐ€, 05:์กฐ๊ฑด๋ถ€์ง€์ •๊ฐ€, 06:์ตœ์œ ๋ฆฌ์ง€์ •๊ฐ€, 07:์ตœ์šฐ์„ ์ง€์ •๊ฐ€, 10:์ง€์ •๊ฐ€IOC, 13:์‹œ์žฅ๊ฐ€IOC, 16:์ตœ์œ ๋ฆฌIOC, 20:์ง€์ •๊ฐ€FOK, 23:์‹œ์žฅ๊ฐ€FOK, 26:์ตœ์œ ๋ฆฌFOK, 61:์žฅ๊ฐœ์‹œ์ „์‹œ๊ฐ„์™ธ, 62:์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค, 81:์‹œ๊ฐ„์™ธ์ข…๊ฐ€ if sHogaGb in ['03', '07', '06']: nPrice = 0 sOrgOrderNo = 0 ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo) return ret except Exception as e: print('[%s]์ •๋Ÿ‰๋งค๋„ Error '%(self.sName,e)) Telegram('[StockTrader][%s]์ •๋Ÿ‰๋งค๋„ Error : %s' % (self.sName, e), send='mc') logger.error('[%s]์ •๋Ÿ‰๋งค๋„ Error : %s' % (self.sName, e)) def ์ •์•ก๋งค๋„(self, sRQName, ์ข…๋ชฉ์ฝ”๋“œ, ๋งค๋„๊ฐ€, ์ˆ˜๋Ÿ‰): # sRQName = '์ •์•ก๋งค๋„%s' % self.sScreenNo sScreenNo = self.GenScreenNO() sAccNo = self.sAccount nOrderType = 2 # (1:์‹ ๊ทœ๋งค์ˆ˜, 2:์‹ ๊ทœ๋งค๋„ 3:๋งค์ˆ˜์ทจ์†Œ, 4:๋งค๋„์ทจ์†Œ, 5:๋งค์ˆ˜์ •์ •, 6:๋งค๋„์ •์ •) sCode = ์ข…๋ชฉ์ฝ”๋“œ nQty = ์ˆ˜๋Ÿ‰ nPrice = ๋งค๋„๊ฐ€ sHogaGb = self.๋งค๋„๋ฐฉ๋ฒ• # 00:์ง€์ •๊ฐ€, 03:์‹œ์žฅ๊ฐ€, 05:์กฐ๊ฑด๋ถ€์ง€์ •๊ฐ€, 06:์ตœ์œ ๋ฆฌ์ง€์ •๊ฐ€, 07:์ตœ์šฐ์„ ์ง€์ •๊ฐ€, 10:์ง€์ •๊ฐ€IOC, 13:์‹œ์žฅ๊ฐ€IOC, 16:์ตœ์œ ๋ฆฌIOC, 20:์ง€์ •๊ฐ€FOK, 23:์‹œ์žฅ๊ฐ€FOK, 26:์ตœ์œ ๋ฆฌFOK, 61:์žฅ๊ฐœ์‹œ์ „์‹œ๊ฐ„์™ธ, 62:์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค, 81:์‹œ๊ฐ„์™ธ์ข…๊ฐ€ if sHogaGb in ['03', '07', '06']: nPrice = 0 sOrgOrderNo = 0 ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo) return ret def ์ฃผ๋ฌธ๋“ฑ๋ก(self, sRQName, ์ฃผ๋ฌธ๋ฒˆํ˜ธ): self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘[์ฃผ๋ฌธ๋ฒˆํ˜ธ] = sRQName Ui_๊ณ„์ขŒ์ •๋ณด์กฐํšŒ, QtBaseClass_๊ณ„์ขŒ์ •๋ณด์กฐํšŒ = uic.loadUiType("./UI/๊ณ„์ขŒ์ •๋ณด์กฐํšŒ.ui") class ํ™”๋ฉด_๊ณ„์ขŒ์ •๋ณด(QDialog, Ui_๊ณ„์ขŒ์ •๋ณด์กฐํšŒ): def __init__(self, sScreenNo, kiwoom=None, parent=None): super(ํ™”๋ฉด_๊ณ„์ขŒ์ •๋ณด, self).__init__(parent) # Initializeํ•˜๋Š” ํ˜•์‹ self.setAttribute(Qt.WA_DeleteOnClose) self.setupUi(self) self.sScreenNo = sScreenNo self.kiwoom = kiwoom self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['์ข…๋ชฉ๋ฒˆํ˜ธ', '์ข…๋ชฉ๋ช…', 'ํ˜„์žฌ๊ฐ€', '๋ณด์œ ์ˆ˜๋Ÿ‰', '๋งค์ž…๊ฐ€', '๋งค์ž…๊ธˆ์•ก', 'ํ‰๊ฐ€๊ธˆ์•ก', '์ˆ˜์ต๋ฅ (%)', 'ํ‰๊ฐ€์†์ต', '๋งค๋งค๊ฐ€๋Šฅ์ˆ˜๋Ÿ‰'] self.๋ณด์ด๋Š”์ปฌ๋Ÿผ = ['์ข…๋ชฉ๋ฒˆํ˜ธ', '์ข…๋ชฉ๋ช…', 'ํ˜„์žฌ๊ฐ€', '๋ณด์œ ์ˆ˜๋Ÿ‰', '๋งค์ž…๊ฐ€', '๋งค์ž…๊ธˆ์•ก', 'ํ‰๊ฐ€๊ธˆ์•ก', '์ˆ˜์ต๋ฅ (%)', 'ํ‰๊ฐ€์†์ต', '๋งค๋งค๊ฐ€๋Šฅ์ˆ˜๋Ÿ‰'] # ์ฃผ๋‹น ์†์ต -> ์ˆ˜์ต๋ฅ (%) self.result = [] self.KiwoomAccount() def KiwoomConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) def KiwoomDisConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) def KiwoomAccount(self): ACCOUNT_CNT = self.kiwoom.dynamicCtotal_all('GetLoginInfo("ACCOUNT_CNT")') ACC_NO = self.kiwoom.dynamicCtotal_all('GetLoginInfo("ACCNO")') self.account = ACC_NO.split(';')[0:-1] # ๊ณ„์ขŒ๋ฒˆํ˜ธ๊ฐ€ ;๊ฐ€ ๋ถ™์–ด์„œ ๋‚˜์˜ด(์—๋กœ ๊ณ„์ขŒ๊ฐ€ 3๊ฐœ๋ฉด 111;222;333) self.comboBox.clear() self.comboBox.addItems(self.account) logger.debug("๋ณด์œ  ๊ณ„์ขŒ์ˆ˜: %s ๊ณ„์ขŒ๋ฒˆํ˜ธ: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO)) def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg): logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg)) def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): # logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) if self.sScreenNo != int(sScrNo): return logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % ( sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) if sRQName == "๊ณ„์ขŒํ‰๊ฐ€์ž”๊ณ ๋‚ด์—ญ์š”์ฒญ": cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) for i in range(0, cnt): row = [] for j in self.columns: # print(j) S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') # print(S) if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') row.adding(S) self.result.adding(row) # logger.debug("%s" % row) if sPreNext == '2': self.Request(_repeat=2) else: self.model.umkate(KnowledgeFrame(data=self.result, columns=self.๋ณด์ด๋Š”์ปฌ๋Ÿผ)) print(self.result) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) def Request(self, _repeat=0): ๊ณ„์ขŒ๋ฒˆํ˜ธ = self.comboBox.currentText().strip() logger.debug("๊ณ„์ขŒ๋ฒˆํ˜ธ %s" % ๊ณ„์ขŒ๋ฒˆํ˜ธ) # KOA StudioSA์—์„œ opw00018 ํ™•์ธ ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ณ„์ขŒ๋ฒˆํ˜ธ", ๊ณ„์ขŒ๋ฒˆํ˜ธ) # 8132495511 ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๋น„๋ฐ€๋ฒˆํ˜ธ์ž…๋ ฅ๋งค์ฒด๊ตฌ๋ถ„", '00') ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์กฐํšŒ๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "๊ณ„์ขŒํ‰๊ฐ€์ž”๊ณ ๋‚ด์—ญ์š”์ฒญ", "opw00018", _repeat,'{:04d}'.formating(self.sScreenNo)) # ์กฐํšŒ ๋ฒ„ํŠผ(QtDesigner์—์„œ ์กฐํšŒ๋ฒ„ํŠผ ๋ˆ„๋ฅด๊ณ  ์˜ค๋ฅธ์ชฝ ํ•˜๋‹จ์— ์‹œ๊ทธ๋„/์Šฌ๋กฏํŽธ์ง‘๊ธฐ๋ฅผ ๋ณด๋ฉด ์กฐํšŒ๋ฒ„ํŠผ ์‹œ๊ทธ๋„(clicked), ์Šฌ๋กฏ(Inquiry())๋กœ ํ™•์ธ๊ฐ€๋Šฅํ•จ def inquiry(self): self.result = [] self.Request(_repeat=0) def robot_account(self): global ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ = self.comboBox.currentText().strip() # sqlite3 ์‚ฌ์šฉ try: with sqlite3.connect(DATABASE) as conn: cursor = conn.cursor() robot_account = pickle.dumps(๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=True) _robot_account = base64.encodebytes(robot_account) cursor.execute("REPLACE into Setting(keyword, value) values (?, ?)", ['robotaccount', _robot_account]) conn.commit() print("๋กœ๋ด‡ ๊ณ„์ขŒ ๋“ฑ๋ก ์™„๋ฃŒ") except Exception as e: print('robot_account', e) Ui_์ผ์ž๋ณ„์ฃผ๊ฐ€์กฐํšŒ, QtBaseClass_์ผ์ž๋ณ„์ฃผ๊ฐ€์กฐํšŒ = uic.loadUiType("./UI/์ผ์ž๋ณ„์ฃผ๊ฐ€์กฐํšŒ.ui") class ํ™”๋ฉด_์ผ๋ณ„์ฃผ๊ฐ€(QDialog, Ui_์ผ์ž๋ณ„์ฃผ๊ฐ€์กฐํšŒ): def __init__(self, sScreenNo, kiwoom=None, parent=None): super(ํ™”๋ฉด_์ผ๋ณ„์ฃผ๊ฐ€, self).__init__(parent) self.setAttribute(Qt.WA_DeleteOnClose) self.setupUi(self) self.setWindowTitle('์ผ์ž๋ณ„ ์ฃผ๊ฐ€ ์กฐํšŒ') self.sScreenNo = sScreenNo self.kiwoom = kiwoom self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Œ€๊ธˆ'] self.result = [] d = today self.lineEdit_date.setText(str(d)) def KiwoomConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) def KiwoomDisConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg): logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg)) def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): # logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) if self.sScreenNo != int(sScrNo): return if sRQName == "์ฃผ์‹์ผ๋ด‰์ฐจํŠธ์กฐํšŒ": ์ข…๋ชฉ์ฝ”๋“œ = '' cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) for i in range(0, cnt): row = [] for j in self.columns: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') row.adding(S) self.result.adding(row) if sPreNext == '2': QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.Request(_repeat=2)) else: kf = KnowledgeFrame(data=self.result, columns=self.columns) kf['์ข…๋ชฉ์ฝ”๋“œ'] = self.์ข…๋ชฉ์ฝ”๋“œ self.model.umkate(kf[['์ข…๋ชฉ์ฝ”๋“œ'] + self.columns]) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) def Request(self, _repeat=0): self.์ข…๋ชฉ์ฝ”๋“œ = self.lineEdit_code.text().strip() ๊ธฐ์ค€์ผ์ž = self.lineEdit_date.text().strip().replacing('-', '') ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", self.์ข…๋ชฉ์ฝ”๋“œ) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ธฐ์ค€์ผ์ž", ๊ธฐ์ค€์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ˆ˜์ •์ฃผ๊ฐ€๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ฃผ์‹์ผ๋ด‰์ฐจํŠธ์กฐํšŒ", "OPT10081", _repeat, '{:04d}'.formating(self.sScreenNo)) def inquiry(self): self.result = [] self.Request(_repeat=0) Ui_๋ถ„๋ณ„์ฃผ๊ฐ€์กฐํšŒ, QtBaseClass_๋ถ„๋ณ„์ฃผ๊ฐ€์กฐํšŒ = uic.loadUiType("./UI/๋ถ„๋ณ„์ฃผ๊ฐ€์กฐํšŒ.ui") class ํ™”๋ฉด_๋ถ„๋ณ„์ฃผ๊ฐ€(QDialog, Ui_๋ถ„๋ณ„์ฃผ๊ฐ€์กฐํšŒ): def __init__(self, sScreenNo, kiwoom=None, parent=None): super(ํ™”๋ฉด_๋ถ„๋ณ„์ฃผ๊ฐ€, self).__init__(parent) self.setAttribute(Qt.WA_DeleteOnClose) self.setupUi(self) self.setWindowTitle('๋ถ„๋ณ„ ์ฃผ๊ฐ€ ์กฐํšŒ') self.sScreenNo = sScreenNo self.kiwoom = kiwoom self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['์ฒด๊ฒฐ์‹œ๊ฐ„', 'ํ˜„์žฌ๊ฐ€', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰'] self.result = [] def KiwoomConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) def KiwoomDisConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg): logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg)) def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): # logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) print('ํ™”๋ฉด_๋ถ„๋ณ„์ฃผ๊ฐ€ : OnReceiveTrData') if self.sScreenNo != int(sScrNo): return if sRQName == "์ฃผ์‹๋ถ„๋ด‰์ฐจํŠธ์กฐํšŒ": ์ข…๋ชฉ์ฝ”๋“œ = '' cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) for i in range(0, cnt): row = [] for j in self.columns: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and (S[0] == '-' or S[0] == '+'): S = S[1:].lstrip('0') row.adding(S) self.result.adding(row) # kf = KnowledgeFrame(data=self.result, columns=self.columns) # kf.to_csv('๋ถ„๋ด‰.csv', encoding='euc-kr') if sPreNext == '2': QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.Request(_repeat=2)) else: kf = KnowledgeFrame(data=self.result, columns=self.columns) kf.to_csv('๋ถ„๋ด‰.csv', encoding='euc-kr', index=False) kf['์ข…๋ชฉ์ฝ”๋“œ'] = self.์ข…๋ชฉ์ฝ”๋“œ self.model.umkate(kf[['์ข…๋ชฉ์ฝ”๋“œ'] + self.columns]) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) def Request(self, _repeat=0): self.์ข…๋ชฉ์ฝ”๋“œ = self.lineEdit_code.text().strip() ํ‹ฑ๋ฒ”์œ„ = self.comboBox_getting_min.currentText()[0:2].strip() if ํ‹ฑ๋ฒ”์œ„[0] == '0': ํ‹ฑ๋ฒ”์œ„ = ํ‹ฑ๋ฒ”์œ„[1:] ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", self.์ข…๋ชฉ์ฝ”๋“œ) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "ํ‹ฑ๋ฒ”์œ„", ํ‹ฑ๋ฒ”์œ„) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ˆ˜์ •์ฃผ๊ฐ€๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ฃผ์‹๋ถ„๋ด‰์ฐจํŠธ์กฐํšŒ", "OPT10080", _repeat, '{:04d}'.formating(self.sScreenNo)) def inquiry(self): self.result = [] self.Request(_repeat=0) Ui_์—…์ข…์ •๋ณด, QtBaseClass_์—…์ข…์ •๋ณด = uic.loadUiType("./UI/์—…์ข…์ •๋ณด์กฐํšŒ.ui") class ํ™”๋ฉด_์—…์ข…์ •๋ณด(QDialog, Ui_์—…์ข…์ •๋ณด): def __init__(self, sScreenNo, kiwoom=None, parent=None): super(ํ™”๋ฉด_์—…์ข…์ •๋ณด, self).__init__(parent) self.setAttribute(Qt.WA_DeleteOnClose) self.setupUi(self) self.setWindowTitle('์—…์ข…์ •๋ณด ์กฐํšŒ') self.sScreenNo = sScreenNo self.kiwoom = kiwoom self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['์ข…๋ชฉ์ฝ”๋“œ', '์ข…๋ชฉ๋ช…', 'ํ˜„์žฌ๊ฐ€', '๋Œ€๋น„๊ธฐํ˜ธ', '์ „์ผ๋Œ€๋น„', '๋“ฑ๋ฝ๋ฅ ', '๊ฑฐ๋ž˜๋Ÿ‰', '๋น„์ค‘', '๊ฑฐ๋ž˜๋Œ€๊ธˆ', '์ƒํ•œ', '์ƒ์Šน', '๋ณดํ•ฉ', 'ํ•˜๋ฝ', 'ํ•˜ํ•œ', '์ƒ์žฅ์ข…๋ชฉ์ˆ˜'] self.result = [] d = today self.lineEdit_date.setText(str(d)) def KiwoomConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) def KiwoomDisConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg): logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg)) def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): # logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) if self.sScreenNo != int(sScrNo): return if sRQName == "์—…์ข…์ •๋ณด์กฐํšŒ": ์ข…๋ชฉ์ฝ”๋“œ = '' cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) for i in range(0, cnt): row = [] for j in self.columns: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') row.adding(S) self.result.adding(row) if sPreNext == '2': QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.Request(_repeat=2)) else: kf = KnowledgeFrame(data=self.result, columns=self.columns) kf['์—…์ข…์ฝ”๋“œ'] = self.์—…์ข…์ฝ”๋“œ kf.to_csv("์—…์ข…์ •๋ณด.csv") self.model.umkate(kf[['์—…์ข…์ฝ”๋“œ'] + self.columns]) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) def Request(self, _repeat=0): self.์—…์ข…์ฝ”๋“œ = self.lineEdit_code.text().strip() ๊ธฐ์ค€์ผ์ž = self.lineEdit_date.text().strip().replacing('-', '') ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์—…์ข…์ฝ”๋“œ", self.์—…์ข…์ฝ”๋“œ) ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์—…์ข…์ •๋ณด์กฐํšŒ", "OPT20003", _repeat, '{:04d}'.formating(self.sScreenNo)) def inquiry(self): self.result = [] self.Request(_repeat=0) Ui_์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ, QtBaseClass_์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ = uic.loadUiType("./UI/์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ.ui") class ํ™”๋ฉด_์—…์ข…๋ณ„์ฃผ๊ฐ€(QDialog, Ui_์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ): def __init__(self, sScreenNo, kiwoom=None, parent=None): super(ํ™”๋ฉด_์—…์ข…๋ณ„์ฃผ๊ฐ€, self).__init__(parent) self.setAttribute(Qt.WA_DeleteOnClose) self.setupUi(self) self.setWindowTitle('์—…์ข…๋ณ„ ์ฃผ๊ฐ€ ์กฐํšŒ') self.sScreenNo = sScreenNo self.kiwoom = kiwoom self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['ํ˜„์žฌ๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰', '์ผ์ž', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Œ€๊ธˆ', '๋Œ€์—…์ข…๊ตฌ๋ถ„', '์†Œ์—…์ข…๊ตฌ๋ถ„', '์ข…๋ชฉ์ •๋ณด', '์ˆ˜์ •์ฃผ๊ฐ€์ด๋ฒคํŠธ', '์ „์ผ์ข…๊ฐ€'] self.result = [] d = today self.lineEdit_date.setText(str(d)) def KiwoomConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) def KiwoomDisConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg): logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg)) def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): # logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) if self.sScreenNo != int(sScrNo): return if sRQName == "์—…์ข…์ผ๋ด‰์กฐํšŒ": ์ข…๋ชฉ์ฝ”๋“œ = '' cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) for i in range(0, cnt): row = [] for j in self.columns: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') row.adding(S) self.result.adding(row) if sPreNext == '2': QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.Request(_repeat=2)) else: kf = KnowledgeFrame(data=self.result, columns=self.columns) kf['์—…์ข…์ฝ”๋“œ'] = self.์—…์ข…์ฝ”๋“œ self.model.umkate(kf[['์—…์ข…์ฝ”๋“œ'] + self.columns]) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) def Request(self, _repeat=0): self.์—…์ข…์ฝ”๋“œ = self.lineEdit_code.text().strip() ๊ธฐ์ค€์ผ์ž = self.lineEdit_date.text().strip().replacing('-', '') ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์—…์ข…์ฝ”๋“œ", self.์—…์ข…์ฝ”๋“œ) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ธฐ์ค€์ผ์ž", ๊ธฐ์ค€์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ˆ˜์ •์ฃผ๊ฐ€๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์—…์ข…์ผ๋ด‰์กฐํšŒ", "OPT20006", _repeat, '{:04d}'.formating(self.sScreenNo)) def inquiry(self): self.result = [] self.Request(_repeat=0) class ํ™”๋ฉด_์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž(QDialog, Ui_์ผ์ž๋ณ„์ฃผ๊ฐ€์กฐํšŒ): def __init__(self, sScreenNo, kiwoom=None, parent=None): super(ํ™”๋ฉด_์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž, self).__init__(parent) self.setAttribute(Qt.WA_DeleteOnClose) self.setupUi(self) self.setWindowTitle('์ข…๋ชฉ๋ณ„ ํˆฌ์ž์ž ์กฐํšŒ') self.sScreenNo = sScreenNo self.kiwoom = kiwoom self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€', '์ „์ผ๋Œ€๋น„', '๋ˆ„์ ๊ฑฐ๋ž˜๋Œ€๊ธˆ', '๊ฐœ์ธํˆฌ์ž์ž', '์™ธ๊ตญ์ธํˆฌ์ž์ž', '๊ธฐ๊ด€๊ณ„', '๊ธˆ์œตํˆฌ์ž', '๋ณดํ—˜', 'ํˆฌ์‹ ', '๊ธฐํƒ€๊ธˆ์œต', '์€ํ–‰', '์—ฐ๊ธฐ๊ธˆ๋“ฑ', '๊ตญ๊ฐ€', '๋‚ด์™ธ๊ตญ์ธ', '์‚ฌ๋ชจํŽ€๋“œ', '๊ธฐํƒ€๋ฒ•์ธ'] self.result = [] d = today self.lineEdit_date.setText(str(d)) def KiwoomConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) def KiwoomDisConnect(self): self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg): logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg)) def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): # logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) if self.sScreenNo != int(sScrNo): return if sRQName == "์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์กฐํšŒ": cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) for i in range(0, cnt): row = [] for j in self.columns: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') row.adding(S) self.result.adding(row) if sPreNext == '2': QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.Request(_repeat=2)) else: kf = KnowledgeFrame(data=self.result, columns=self.columns) kf['์ข…๋ชฉ์ฝ”๋“œ'] = self.lineEdit_code.text().strip() kf_new = kf[['์ข…๋ชฉ์ฝ”๋“œ'] + self.columns] self.model.umkate(kf_new) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) def Request(self, _repeat=0): ์ข…๋ชฉ์ฝ”๋“œ = self.lineEdit_code.text().strip() ๊ธฐ์ค€์ผ์ž = self.lineEdit_date.text().strip().replacing('-', '') ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ผ์ž", ๊ธฐ์ค€์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", ์ข…๋ชฉ์ฝ”๋“œ) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, int)', "๊ธˆ์•ก์ˆ˜๋Ÿ‰๊ตฌ๋ถ„", 2) # 1:๊ธˆ์•ก, 2:์ˆ˜๋Ÿ‰ ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, int)', "๋งค๋งค๊ตฌ๋ถ„", 0) # 0:์ˆœ๋งค์ˆ˜, 1:๋งค์ˆ˜, 2:๋งค๋„ ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, int)', "๋‹จ์œ„๊ตฌ๋ถ„", 1) # 1000:์ฒœ์ฃผ, 1:๋‹จ์ฃผ ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์กฐํšŒ", "OPT10060", _repeat, '{:04d}'.formating(self.sScreenNo)) def inquiry(self): self.result = [] self.Request(_repeat=0) Ui_TradeShortTerm, QtBaseClass_TradeShortTerm = uic.loadUiType("./UI/TradeShortTerm.ui") class ํ™”๋ฉด_TradeShortTerm(QDialog, Ui_TradeShortTerm): def __init__(self, parent): super(ํ™”๋ฉด_TradeShortTerm, self).__init__(parent) self.setupUi(self) self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.result = [] def inquiry(self): # Google spreadsheet ์‚ฌ์šฉ try: self.data = import_googlesheet() print(self.data) self.model.umkate(self.data) for i in range(length(self.data.columns)): self.tableView.resizeColumnToContents(i) except Exception as e: print('ํ™”๋ฉด_TradeShortTerm : inquiry Error ', e) logger.error('ํ™”๋ฉด_TradeShortTerm : inquiry Error : %s' % e) class CTradeShortTerm(CTrade): # ๋กœ๋ด‡ ์ถ”๊ฐ€ ์‹œ __init__ : ๋ณต์‚ฌ, Setting, ์ดˆ๊ธฐ์กฐ๊ฑด:์ „๋žต์— ๋งž๊ฒŒ, ๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ~Run:๋ณต์‚ฌ def __init__(self, sName, UUID, kiwoom=None, parent=None): self.sName = sName self.UUID = UUID self.sAccount = None self.kiwoom = kiwoom self.parent = parent self.running = False self.์ฃผ๋ฌธ๊ฒฐ๊ณผ = dict() self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘ = dict() self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock = dict() self.portfolio = dict() self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = [] self.๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง์ฒดํฌ = False self.Smtotal_allScreenNumber = 9999 self.d = today # ๊ตฌ๊ธ€ ์Šคํ”„๋ ˆ๋“œ์‹œํŠธ์—์„œ ์ฝ์€ KnowledgeFrame์—์„œ ๋กœ๋ด‡๋ณ„ ์ข…๋ชฉ๋ฆฌ์ŠคํŠธ ์…‹ํŒ… def set_stocklist(self, data): self.Stocklist = dict() self.Stocklist['์ปฌ๋Ÿผ๋ช…'] = list(data.columns) for ์ข…๋ชฉ์ฝ”๋“œ in data['์ข…๋ชฉ์ฝ”๋“œ'].distinctive(): temp_list = data[data['์ข…๋ชฉ์ฝ”๋“œ'] == ์ข…๋ชฉ์ฝ”๋“œ].values[0] self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ] = { '๋ฒˆํ˜ธ': temp_list[self.Stocklist['์ปฌ๋Ÿผ๋ช…'].index('๋ฒˆํ˜ธ')], '์ข…๋ชฉ๋ช…': temp_list[self.Stocklist['์ปฌ๋Ÿผ๋ช…'].index('์ข…๋ชฉ๋ช…')], '์ข…๋ชฉ์ฝ”๋“œ': ์ข…๋ชฉ์ฝ”๋“œ, '์‹œ์žฅ': temp_list[self.Stocklist['์ปฌ๋Ÿผ๋ช…'].index('์‹œ์žฅ')], 'ํˆฌ์ž๋น„์ค‘': float(temp_list[self.Stocklist['์ปฌ๋Ÿผ๋ช…'].index('๋น„์ค‘')]), # ์ €์žฅ ํ›„ setting ํ•จ์ˆ˜์—์„œ ์ „๋žต์˜ ๋‹จ์œ„ํˆฌ์ž๊ธˆ์„ ๊ณฑํ•จ '์‹œ๊ฐ€์œ„์น˜': list(mapping(float, temp_list[self.Stocklist['์ปฌ๋Ÿผ๋ช…'].index('์‹œ๊ฐ€์œ„์น˜')].split(','))), '๋งค์ˆ˜๊ฐ€': list( int(float(temp_list[list(data.columns).index(col)].replacing(',', ''))) for col in data.columns if '๋งค์ˆ˜๊ฐ€' in col and temp_list[list(data.columns).index(col)] != ''), '๋งค๋„์ „๋žต': temp_list[self.Stocklist['์ปฌ๋Ÿผ๋ช…'].index('๊ธฐ๋ณธ๋งค๋„์ „๋žต')], '๋งค๋„๊ฐ€': list( int(float(temp_list[list(data.columns).index(col)].replacing(',', ''))) for col in data.columns if '๋ชฉํ‘œ๊ฐ€' in col and temp_list[list(data.columns).index(col)] != '') } return self.Stocklist # RobotAdd ํ•จ์ˆ˜์—์„œ ์ดˆ๊ธฐํ™” ๋‹ค์Œ ์…‹ํŒ… ์‹คํ–‰ํ•ด์„œ ์„ค์ •๊ฐ’ ๋„˜๊น€ def Setting(self, sScreenNo, ๋งค์ˆ˜๋ฐฉ๋ฒ•='00', ๋งค๋„๋ฐฉ๋ฒ•='03', ์ข…๋ชฉ๋ฆฌ์ŠคํŠธ=mk.KnowledgeFrame()): try: self.sScreenNo = sScreenNo self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = [] self.๋งค์ˆ˜๋ฐฉ๋ฒ• = ๋งค์ˆ˜๋ฐฉ๋ฒ• self.๋งค๋„๋ฐฉ๋ฒ• = ๋งค๋„๋ฐฉ๋ฒ• self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = ์ข…๋ชฉ๋ฆฌ์ŠคํŠธ self.Stocklist = self.set_stocklist(self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) # ๋ฒˆํ˜ธ, ์ข…๋ชฉ๋ช…, ์ข…๋ชฉ์ฝ”๋“œ, ์‹œ์žฅ, ๋น„์ค‘, ์‹œ๊ฐ€์œ„์น˜, ๋งค์ˆ˜๊ฐ€, ๋งค๋„์ „๋žต, ๋งค๋„๊ฐ€ self.Stocklist['์ „๋žต'] = { '๋‹จ์œ„ํˆฌ์ž๊ธˆ': '', '๋ชจ๋‹ˆํ„ฐ๋ง์ข…๋ฃŒ์‹œ๊ฐ„': '', '๋ณด์œ ์ผ': '', 'ํˆฌ์ž๊ธˆ๋น„์ค‘': '', '๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด': [], '์ „๋žต๋งค๋„๊ฐ€': [], } row_data = shortterm_strategy_sheet.getting_total_all_values() for data in row_data: if data[0] == '๋‹จ์œ„ํˆฌ์ž๊ธˆ': self.Stocklist['์ „๋žต']['๋‹จ์œ„ํˆฌ์ž๊ธˆ'] = int(data[1]) elif data[0] == '๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์ข…๋ฃŒ์‹œ๊ฐ„': if length(data[1][:-3]) == 1: data[1] = '0' + data[1] self.Stocklist['์ „๋žต']['๋ชจ๋‹ˆํ„ฐ๋ง์ข…๋ฃŒ์‹œ๊ฐ„'] = data[1] + ':00' elif data[0] == '๋ณด์œ ์ผ': self.Stocklist['์ „๋žต']['๋ณด์œ ์ผ'] = int(data[1]) elif data[0] == 'ํˆฌ์ž๊ธˆ ๋น„์ค‘': self.Stocklist['์ „๋žต']['ํˆฌ์ž๊ธˆ๋น„์ค‘'] = float(data[1][:-1]) # elif data[0] == '์†์ ˆ์œจ': # self.Stocklist['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'].adding(float(data[1][:-1])) # elif data[0] == '์‹œ๊ฐ€ ์œ„์น˜': # self.Stocklist['์ „๋žต']['์‹œ๊ฐ€์œ„์น˜'] = list(mapping(int, data[1].split(','))) elif '๊ตฌ๊ฐ„' in data[0]: if data[0][-1] != '1' and data[0][-1] != '2': self.Stocklist['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'].adding(float(data[1][:-1])) elif '์†์ ˆ๊ฐ€' == data[0]: self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€'].adding(float(data[1].replacing('%', ''))) elif '๋ณธ์ „๊ฐ€' == data[0]: self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€'].adding(float(data[1].replacing('%', ''))) elif '์ต์ ˆ๊ฐ€' in data[0]: self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€'].adding(float(data[1].replacing('%', ''))) self.Stocklist['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'].insert(0, self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€'][0]) # ์†์ ˆ๊ฐ€ self.Stocklist['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'].insert(1, self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€'][1]) # ๋ณธ์ „๊ฐ€ for code in self.Stocklist.keys(): if code == '์ปฌ๋Ÿผ๋ช…' or code == '์ „๋žต': continue else: self.Stocklist[code]['๋‹จ์œ„ํˆฌ์ž๊ธˆ'] = int( self.Stocklist[code]['ํˆฌ์ž๋น„์ค‘'] * self.Stocklist['์ „๋žต']['๋‹จ์œ„ํˆฌ์ž๊ธˆ']) self.Stocklist[code]['์‹œ๊ฐ€์ฒดํฌ'] = False self.Stocklist[code]['๋งค์ˆ˜์ƒํ•œ๋„๋‹ฌ'] = False self.Stocklist[code]['๋งค์ˆ˜์กฐ๊ฑด'] = 0 self.Stocklist[code]['๋งค์ˆ˜์ด์ˆ˜๋Ÿ‰'] = 0 # ๋ถ„ํ• ๋งค์ˆ˜์— ๋”ฐ๋ฅธ ์ˆ˜๋Ÿ‰์ฒดํฌ self.Stocklist[code]['๋งค์ˆ˜์ˆ˜๋Ÿ‰'] = 0 # ๋ถ„ํ• ๋งค์ˆ˜ ๋‹จ์œ„ self.Stocklist[code]['๋งค์ˆ˜์ฃผ๋ฌธ์™„๋ฃŒ'] = 0 # ๋ถ„ํ• ๋งค์ˆ˜์— ๋”ฐ๋ฅธ ๋งค์ˆ˜ ์ฃผ๋ฌธ ์ˆ˜ self.Stocklist[code]['๋งค์ˆ˜๊ฐ€์ „๋žต'] = length(self.Stocklist[code]['๋งค์ˆ˜๊ฐ€']) # ๋งค์ˆ˜ ์ „๋žต์— ๋”ฐ๋ฅธ ๋งค์ˆ˜๊ฐ€ ์ง€์ • ์ˆ˜๋Ÿ‰ if self.Stocklist[code]['๋งค๋„์ „๋žต'] == '4': self.Stocklist[code]['๋งค๋„๊ฐ€'].adding(self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€']) print(self.Stocklist) except Exception as e: print('CTradeShortTerm_Setting Error :', e) Telegram('[XTrader]CTradeShortTerm_Setting Error : %s' % e, send='mc') logger.error('CTradeShortTerm_Setting Error : %s' % e) # ์ˆ˜๋™ ํฌํŠธํด๋ฆฌ์˜ค ์ƒ์„ฑ def manual_portfolio(self): self.portfolio = dict() self.Stocklist = { '024840': {'๋ฒˆํ˜ธ': '8.030', '์ข…๋ชฉ๋ช…': 'KBI๋ฉ”ํƒˆ', '์ข…๋ชฉ์ฝ”๋“œ': '024840', '์‹œ์žฅ': 'KOSDAQ', '๋งค์ˆ˜์ „๋žต': '1', '๋งค์ˆ˜๊ฐ€': [1468], '๋งค์ˆ˜์กฐ๊ฑด': 2, '์ˆ˜๋Ÿ‰': 310, '๋งค๋„์ „๋žต': '1', '๋งค๋„๊ฐ€': [], '๋งค์ˆ˜์ผ': '2020/08/26 09:56:54'}, '097800': {'๋ฒˆํ˜ธ': '7.099', '์ข…๋ชฉ๋ช…': '์œˆํŒฉ', '์ข…๋ชฉ์ฝ”๋“œ': '097800', '์‹œ์žฅ': 'KOSDAQ', '๋งค์ˆ˜์ „๋žต': '1', '๋งค์ˆ˜๊ฐ€': [3219], '๋งค์ˆ˜์กฐ๊ฑด': 1, '์ˆ˜๋Ÿ‰': 310, '๋งค๋„์ „๋žต': '4', '๋งค๋„๊ฐ€': [3700], '๋งค์ˆ˜์ผ': '2020/05/29 09:22:39'}, '297090': {'๋ฒˆํ˜ธ': '7.101', '์ข…๋ชฉ๋ช…': '์”จ์—์Šค๋ฒ ์–ด๋ง', '์ข…๋ชฉ์ฝ”๋“œ': '297090', '์‹œ์žฅ': 'KOSDAQ', '๋งค์ˆ˜์ „๋žต': '1', '๋งค์ˆ˜๊ฐ€': [5000], '๋งค์ˆ˜์กฐ๊ฑด': 3, '์ˆ˜๋Ÿ‰': 15, '๋งค๋„์ „๋žต': '2', '๋งค๋„๊ฐ€': [], '๋งค์ˆ˜์ผ': '2020/06/03 09:12:15'}, } self.strategy = {'์ „๋žต': {'๋‹จ์œ„ํˆฌ์ž๊ธˆ': 200000, '๋ชจ๋‹ˆํ„ฐ๋ง์ข…๋ฃŒ์‹œ๊ฐ„': '10:30:00', '๋ณด์œ ์ผ': 20, 'ํˆฌ์ž๊ธˆ๋น„์ค‘': 70.0, '๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด': [-2.7, 0.3, -3.0, -4.0, -5.0, -7.0], '์ „๋žต๋งค๋„๊ฐ€': [-2.7, 0.3, 3.0, 6.0]}} for code in list(self.Stocklist.keys()): self.portfolio[code] = CPortStock_ShortTerm(๋ฒˆํ˜ธ=self.Stocklist[code]['๋ฒˆํ˜ธ'], ์ข…๋ชฉ์ฝ”๋“œ=code, ์ข…๋ชฉ๋ช…=self.Stocklist[code]['์ข…๋ชฉ๋ช…'], ์‹œ์žฅ=self.Stocklist[code]['์‹œ์žฅ'], ๋งค์ˆ˜๊ฐ€=self.Stocklist[code]['๋งค์ˆ˜๊ฐ€'][0], ๋งค์ˆ˜์กฐ๊ฑด=self.Stocklist[code]['๋งค์ˆ˜์กฐ๊ฑด'], ๋ณด์œ ์ผ=self.strategy['์ „๋žต']['๋ณด์œ ์ผ'], ๋งค๋„์ „๋žต=self.Stocklist[code]['๋งค๋„์ „๋žต'], ๋งค๋„๊ฐ€=self.Stocklist[code]['๋งค๋„๊ฐ€'], ๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด=self.strategy['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'], ๋งค๋„๊ตฌ๊ฐ„=1, ์ˆ˜๋Ÿ‰=self.Stocklist[code]['์ˆ˜๋Ÿ‰'], ๋งค์ˆ˜์ผ=self.Stocklist[code]['๋งค์ˆ˜์ผ']) # google spreadsheet ๋งค๋งค์ด๋ ฅ ์ƒ์„ฑ def save_history(self, code, status): # ๋งค๋งค์ด๋ ฅ sheet์— ํ•ด๋‹น ์ข…๋ชฉ(๋งค์ˆ˜๋œ ์ข…๋ชฉ)์ด ์žˆ์œผ๋ฉด row๋ฅผ ๋ฐ˜ํ™˜ ์•„๋‹ˆ๋ฉด ์˜ˆ์™ธ์ฒ˜๋ฆฌ -> ์‹ ๊ทœ ๋งค์ˆ˜๋กœ ์ฒ˜๋ฆฌ # ๋งค์ˆ˜ ์ด๋ ฅ : ์ฒด๊ฒฐ์ฒ˜๋ฆฌ, ๋งค์ˆ˜, ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ 0์—์„œ ์ด๋ ฅ ์ €์žฅ # ๋งค๋„ ์ด๋ ฅ : ์ฒด๊ฒฐ์ฒ˜๋ฆฌ, ๋งค๋„, ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ 0์—์„œ ์ด๋ ฅ ์ €์žฅ if status == '๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง': row = [] row.adding(self.portfolio[code].๋ฒˆํ˜ธ) row.adding(self.portfolio[code].์ข…๋ชฉ๋ช…) row.adding(self.portfolio[code].๋งค์ˆ˜๊ฐ€) shortterm_sell_sheet.adding_row(row) try: code_row = shortterm_history_sheet.findtotal_all(self.portfolio[code].์ข…๋ชฉ๋ช…)[-1].row # ์ข…๋ชฉ๋ช…์ด ์žˆ๋Š” ๋ชจ๋“  ์…€์„ ์ฐพ์•„์„œ ๋งจ ์•„๋ž˜์— ์žˆ๋Š” ์…€์„ ์„ ํƒ cell = alpha_list[shortterm_history_cols.index('๋งค๋„๊ฐ€')] + str(code_row) # ๋งค์ˆ˜ ์ด๋ ฅ์— ์žˆ๋Š” ์ข…๋ชฉ์ด ๋งค๋„๊ฐ€ ๋˜์—ˆ๋Š”์ง€ ํ™•์ธ sell_price = shortterm_history_sheet.acell(str(cell)).value # ๋งค๋„ ์ด๋ ฅ์€ ์ถ”๊ฐ€ ๋งค๋„(๋งค๋„์ „๋žต2์˜ ๊ฒฝ์šฐ)๋‚˜ ์‹ ๊ทœ ๋งค๋„์ธ ๊ฒฝ์šฐ๋ผ ๋งค๋„ ์ด๋ ฅ ์œ ๋ฌด์™€ ์ƒ๊ด€์—†์Œ if status == '๋งค๋„': # ๋งค๋„ ์ด๋ ฅ์€ ํฌํŠธํด๋ฆฌ์˜ค์—์„œ ์ข…๋ชฉ pop์„ ํ•˜๋ฏ€๋กœ Stocklist ๋ฐ์ดํ„ฐ ์‚ฌ์šฉ cell = alpha_list[shortterm_history_cols.index('๋งค๋„๊ฐ€')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค๋„์ฒด๊ฒฐ๊ฐ€) cell = alpha_list[shortterm_history_cols.index('๋งค๋„์ˆ˜๋Ÿ‰')] + str(code_row) ์ˆ˜๋Ÿ‰ = shortterm_history_sheet.acell(cell).value # ๋ถ„ํ•  ๋งค๋„์˜ ๊ฒฝ์šฐ ์ด์ „ ๋งค๋„ ์ˆ˜๋Ÿ‰์ด ๊ธฐ๋ก๋˜์–ด ์žˆ์Œ if ์ˆ˜๋Ÿ‰ != '': self.portfolio[code].๋งค๋„์ˆ˜๋Ÿ‰ += int(์ˆ˜๋Ÿ‰) # ๋งค๋„์ˆ˜๋Ÿ‰์€ ์ฃผ๋ฌธ ์ˆ˜๋Ÿ‰์ด๋ฏ€๋กœ ๊ธฐ์กด ์ˆ˜๋Ÿ‰์„ ํ•ฉํ•ด์คŒ shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค๋„์ˆ˜๋Ÿ‰) cell = alpha_list[shortterm_history_cols.index('๋งค๋„์ผ')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) cell = alpha_list[shortterm_history_cols.index('๋งค๋„์ „๋žต')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค๋„์ „๋žต) cell = alpha_list[shortterm_history_cols.index('๋งค๋„๊ตฌ๊ฐ„')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„) ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ  = value_round((self.portfolio[code].๋งค๋„์ฒด๊ฒฐ๊ฐ€ / self.portfolio[code].๋งค์ˆ˜๊ฐ€ - 1) * 100, 2) cell = alpha_list[shortterm_history_cols.index('์ˆ˜์ต๋ฅ (๊ณ„์‚ฐ)')] + str(code_row) # ์ˆ˜์ต๋ฅ  ๊ณ„์‚ฐ shortterm_history_sheet.umkate_acell(cell, ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ ) # ๋งค์ˆ˜ ์ด๋ ฅ์€ ์žˆ์œผ๋‚˜ ๋งค๋„ ์ด๋ ฅ์ด ์—†์Œ -> ๋งค๋„ ์ „ ์ถ”๊ฐ€ ๋งค์ˆ˜ if sell_price == '': if status == '๋งค์ˆ˜': # ํฌํŠธํด๋ฆฌ์˜ค ๋ฐ์ดํ„ฐ ์‚ฌ์šฉ cell = alpha_list[shortterm_history_cols.index('๋งค์ˆ˜๊ฐ€')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค์ˆ˜๊ฐ€) cell = alpha_list[shortterm_history_cols.index('๋งค์ˆ˜์ˆ˜๋Ÿ‰')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].์ˆ˜๋Ÿ‰) cell = alpha_list[shortterm_history_cols.index('๋งค์ˆ˜์ผ')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค์ˆ˜์ผ) cell = alpha_list[shortterm_history_cols.index('๋งค์ˆ˜์กฐ๊ฑด')] + str(code_row) shortterm_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค์ˆ˜์กฐ๊ฑด) else: # ๋งค๋„๊ฐ€๊ฐ€ ๊ธฐ๋ก๋˜์–ด ๊ฑฐ๋ž˜๊ฐ€ ์™„๋ฃŒ๋œ ์ข…๋ชฉ์œผ๋กœ ํŒ๋‹จํ•˜์—ฌ ์˜ˆ์™ธ๋ฐœ์ƒ์œผ๋กœ ์‹ ๊ทœ ๋งค์ˆ˜ ์ถ”๊ฐ€ํ•จ raise Exception('๋งค๋งค์™„๋ฃŒ ์ข…๋ชฉ') except Exception as e: try: # logger.debug('CTradeShortTerm_save_history Error1 : ์ข…๋ชฉ๋ช…:%s, %s' % (self.portfolio[code].์ข…๋ชฉ๋ช…, e)) row = [] row_buy = [] if status == '๋งค์ˆ˜': row.adding(self.portfolio[code].๋ฒˆํ˜ธ) row.adding(self.portfolio[code].์ข…๋ชฉ๋ช…) row.adding(self.portfolio[code].๋งค์ˆ˜๊ฐ€) row.adding(self.portfolio[code].์ˆ˜๋Ÿ‰) row.adding(self.portfolio[code].๋งค์ˆ˜์ผ) row.adding(self.portfolio[code].๋งค์ˆ˜์กฐ๊ฑด) shortterm_history_sheet.adding_row(row) except Exception as e: print('CTradeShortTerm_save_history Error2 : ์ข…๋ชฉ๋ช…:%s, %s' % (self.portfolio[code].์ข…๋ชฉ๋ช…, e)) Telegram('[XTrade]CTradeShortTerm_save_history Error2 : ์ข…๋ชฉ๋ช…:%s, %s' % (self.portfolio[code].์ข…๋ชฉ๋ช…, e), send='mc') logger.error('CTradeShortTerm_save_history Error : ์ข…๋ชฉ๋ช…:%s, %s' % (self.portfolio[code].์ข…๋ชฉ๋ช…, e)) # ๋งค์ˆ˜ ์ „๋žต๋ณ„ ๋งค์ˆ˜ ์กฐ๊ฑด ํ™•์ธ def buy_strategy(self, code, price): result = False condition = self.Stocklist[code]['๋งค์ˆ˜์กฐ๊ฑด'] # ์ดˆ๊ธฐ๊ฐ’ 0 qty = self.Stocklist[code]['๋งค์ˆ˜์ˆ˜๋Ÿ‰'] # ์ดˆ๊ธฐ๊ฐ’ 0 ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€ = price # ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€] ๋งค์ˆ˜๊ฐ€ = self.Stocklist[code]['๋งค์ˆ˜๊ฐ€'] # [๋งค์ˆ˜๊ฐ€1, ๋งค์ˆ˜๊ฐ€2, ๋งค์ˆ˜๊ฐ€3] ์‹œ๊ฐ€์œ„์น˜ํ•˜ํ•œ = self.Stocklist[code]['์‹œ๊ฐ€์œ„์น˜'][0] ์‹œ๊ฐ€์œ„์น˜์ƒํ•œ = self.Stocklist[code]['์‹œ๊ฐ€์œ„์น˜'][1] # 1. ๊ธˆ์ผ์‹œ๊ฐ€ ์œ„์น˜ ์ฒดํฌ(์ดˆ๊ธฐ ํ•œ๋ฒˆ)ํ•˜์—ฌ ๋งค์ˆ˜์กฐ๊ฑด(1~6)๊ณผ ์ฃผ๋ฌธ ์ˆ˜๋Ÿ‰ ๊ณ„์‚ฐ if self.Stocklist[code]['์‹œ๊ฐ€์ฒดํฌ'] == False: # ์ข…๋ชฉ๋ณ„๋กœ ์ดˆ๊ธฐ์— ํ•œ๋ฒˆ๋งŒ ์‹œ๊ฐ€ ์œ„์น˜ ์ฒดํฌ๋ฅผ ํ•˜๋ฉด ๋˜๋ฏ€๋กœ ๋ณ„๋„ ํ•จ์ˆ˜ ๋ฏธ์‚ฌ์šฉ ๋งค์ˆ˜๊ฐ€.adding(์‹œ๊ฐ€) ๋งค์ˆ˜๊ฐ€.sort(reverse=True) band = ๋งค์ˆ˜๊ฐ€.index(์‹œ๊ฐ€) # band = 0 : ๋งค์ˆ˜๊ฐ€1 ์ด์ƒ, band=1: ๋งค์ˆ˜๊ฐ€1, 2 ์‚ฌ์ด, band=2: ๋งค์ˆ˜๊ฐ€2,3 ์‚ฌ์ด ๋งค์ˆ˜๊ฐ€.remove(์‹œ๊ฐ€) if band == length(๋งค์ˆ˜๊ฐ€): # ๋งค์ˆ˜๊ฐ€ ์ง€์ •ํ•œ ๊ตฌ๊ฐ„๋ณด๋‹ค ์‹œ๊ฐ€๊ฐ€ ์•„๋ž˜์ผ ๊ฒฝ์šฐ๋กœ ์ดˆ๊ธฐ๊ฐ’์ด result=False, condition=0 ๋ฆฌํ„ด self.Stocklist[code]['์‹œ๊ฐ€์ฒดํฌ'] = True self.Stocklist[code]['๋งค์ˆ˜์กฐ๊ฑด'] = 0 self.Stocklist[code]['๋งค์ˆ˜์ˆ˜๋Ÿ‰'] = 0 return False, 0, 0 else: # ๋‹จ์œ„ํˆฌ์ž๊ธˆ์œผ๋กœ ๋งค์ˆ˜๊ฐ€๋Šฅํ•œ ์ด ์ˆ˜๋Ÿ‰ ๊ณ„์‚ฐ, band = 0 : ๋งค์ˆ˜๊ฐ€1, band=1: ๋งค์ˆ˜๊ฐ€2, band=2: ๋งค์ˆ˜๊ฐ€3 ๋กœ ๊ณ„์‚ฐ self.Stocklist[code]['๋งค์ˆ˜์ด์ˆ˜๋Ÿ‰'] = self.Stocklist[code]['๋‹จ์œ„ํˆฌ์ž๊ธˆ'] // ๋งค์ˆ˜๊ฐ€[band] if band == 0: # ์‹œ๊ฐ€๊ฐ€ ๋งค์ˆ˜๊ฐ€1๋ณด๋‹ค ๋†’์€ ๊ฒฝ์šฐ # ์‹œ๊ฐ€๊ฐ€ ๋งค์ˆ˜๊ฐ€1์˜ ์‹œ๊ฐ€๋ฒ”์œ„์— ํฌํ•จ : ์กฐ๊ฑด 1, 2, 3 if ๋งค์ˆ˜๊ฐ€[band] * (1 + ์‹œ๊ฐ€์œ„์น˜ํ•˜ํ•œ / 100) <= ์‹œ๊ฐ€ and ์‹œ๊ฐ€ < ๋งค์ˆ˜๊ฐ€[band] * (1 + ์‹œ๊ฐ€์œ„์น˜์ƒํ•œ / 100): condition = length(๋งค์ˆ˜๊ฐ€) self.Stocklist[code]['๋งค์ˆ˜๊ฐ€์ „๋žต'] = length(๋งค์ˆ˜๊ฐ€) qty = self.Stocklist[code]['๋งค์ˆ˜์ด์ˆ˜๋Ÿ‰'] // condition else: # ์‹œ๊ฐ€ ์œ„์น˜์— ๋ฏธํฌํ•จ self.Stocklist[code]['์‹œ๊ฐ€์ฒดํฌ'] = True self.Stocklist[code]['๋งค์ˆ˜์กฐ๊ฑด'] = 0 self.Stocklist[code]['๋งค์ˆ˜์ˆ˜๋Ÿ‰'] = 0 return False, 0, 0 else: # ์‹œ๊ฐ€๊ฐ€ ๋งค์ˆ˜๊ฐ€ ์ค‘๊ฐ„์ธ ๊ฒฝ์šฐ - ๋งค์ˆ˜๊ฐ€1&2์‚ฌ์ด(band 1) : ์กฐ๊ฑด 4,5 / ๋งค์ˆ˜๊ฐ€2&3์‚ฌ์ด(band 2) : ์กฐ๊ฑด 6 for i in range(band): # band 1์ผ ๊ฒฝ์šฐ ๋งค์ˆ˜๊ฐ€ 1์€ ๋ถˆํ•„์š”ํ•˜์—ฌ ์‚ญ์ œ, band 2 : ๋งค์ˆ˜๊ฐ€ 1, 2 ์‚ญ์ œ(band์ˆ˜ ๋งŒํผ ์‚ญ์ œ ์‹คํ–‰) ๋งค์ˆ˜๊ฐ€.pop(0) if ๋งค์ˆ˜๊ฐ€[0] * (1 + ์‹œ๊ฐ€์œ„์น˜ํ•˜ํ•œ / 100) <= ์‹œ๊ฐ€: # ์‹œ๊ฐ€๋ฒ”์œ„ ํฌํ•จ # ์กฐ๊ฑด 4 = ๋งค์ˆ˜๊ฐ€๊ธธ์ด 1 + band 1 + 2(=band+1) -> 4 = 1 + 2*1 + 1 # ์กฐ๊ฑด 5 = ๋งค์ˆ˜๊ฐ€๊ธธ์ด 2 + band 1 + 2(=band+1) -> 5 = 2 + 2*1 + 1 # ์กฐ๊ฑด 6 = ๋งค์ˆ˜๊ฐ€๊ธธ์ด 1 + band 2 + 3(=band+1) -> 6 = 1 + 2*2 + 1 condition = length(๋งค์ˆ˜๊ฐ€) + (2 * band) + 1 self.Stocklist[code]['๋งค์ˆ˜๊ฐ€์ „๋žต'] = length(๋งค์ˆ˜๊ฐ€) qty = self.Stocklist[code]['๋งค์ˆ˜์ด์ˆ˜๋Ÿ‰'] // (condition % 2 + 1) else: self.Stocklist[code]['์‹œ๊ฐ€์ฒดํฌ'] = True self.Stocklist[code]['๋งค์ˆ˜์กฐ๊ฑด'] = 0 self.Stocklist[code]['๋งค์ˆ˜์ˆ˜๋Ÿ‰'] = 0 return False, 0, 0 self.Stocklist[code]['์‹œ๊ฐ€์ฒดํฌ'] = True self.Stocklist[code]['๋งค์ˆ˜์กฐ๊ฑด'] = condition self.Stocklist[code]['๋งค์ˆ˜์ˆ˜๋Ÿ‰'] = qty else: # ์‹œ๊ฐ€ ์œ„์น˜ ์ฒดํฌ๋ฅผ ํ•œ ๋‘๋ฒˆ์งธ ๋ฐ์ดํ„ฐ ์ดํ›„์—๋Š” condition์ด 0์ด๋ฉด ๋ฐ”๋กœ ๋งค์ˆ˜ ๋ถˆ๋งŒ์กฑ ๋ฆฌํ„ด์‹œํ‚ด if condition == 0: # condition 0์€ ๋งค์ˆ˜ ์กฐ๊ฑด ๋ถˆ๋งŒ์กฑ return False, 0, 0 # ๋งค์ˆ˜์กฐ๊ฑด ํ™•์ •, ๋งค์ˆ˜ ์ˆ˜๋Ÿ‰ ๊ณ„์‚ฐ ์™„๋ฃŒ # ๋งค์ˆ˜์ƒํ•œ์— ๋ฏธ๋„๋‹ฌํ•œ ์ƒํƒœ๋กœ ๋งค์ˆ˜๊ฐ€๋กœ ๋‚ด๋ ค์™”์„ ๋•Œ ๋งค์ˆ˜ # ํ˜„์žฌ๊ฐ€๊ฐ€ ํ•ด๋‹น์กฐ๊ฑด์—์„œ์˜ ์‹œ๊ฐ€์œ„์น˜ ์ƒํ•œ ์ด์ƒ์œผ๋กœ ์˜ค๋ฅด๋ฉด ๋งค์ˆ˜์ƒํ•œ๋„๋‹ฌ์„ True๋กœ ํ•ด์„œ ๋งค์ˆ˜ํ•˜์ง€ ์•Š๊ฒŒ ํ•จ if ํ˜„์žฌ๊ฐ€ >= ๋งค์ˆ˜๊ฐ€[0] * (1 + ์‹œ๊ฐ€์œ„์น˜์ƒํ•œ / 100): self.Stocklist[code]['๋งค์ˆ˜์ƒํ•œ๋„๋‹ฌ'] = True if self.Stocklist[code]['๋งค์ˆ˜์ฃผ๋ฌธ์™„๋ฃŒ'] < self.Stocklist[code]['๋งค์ˆ˜๊ฐ€์ „๋žต'] and self.Stocklist[code]['๋งค์ˆ˜์ƒํ•œ๋„๋‹ฌ'] == False: if ํ˜„์žฌ๊ฐ€ == ๋งค์ˆ˜๊ฐ€[0]: result = True self.Stocklist[code]['๋งค์ˆ˜์ฃผ๋ฌธ์™„๋ฃŒ'] += 1 print("๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ๋งŒ์กฑ_์ข…๋ชฉ:%s, ์‹œ๊ฐ€:%s, ์กฐ๊ฑด:%s, ํ˜„์žฌ๊ฐ€:%s, ์ฒดํฌ๊ฒฐ๊ณผ:%s, ์ˆ˜๋Ÿ‰:%s" % ( self.Stocklist[code]['์ข…๋ชฉ๋ช…'], ์‹œ๊ฐ€, condition, ํ˜„์žฌ๊ฐ€, result, qty)) logger.debug("๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ๋งŒ์กฑ_์ข…๋ชฉ:%s, ์‹œ๊ฐ€:%s, ์กฐ๊ฑด:%s, ํ˜„์žฌ๊ฐ€:%s, ์ฒดํฌ๊ฒฐ๊ณผ:%s, ์ˆ˜๋Ÿ‰:%s" % ( self.Stocklist[code]['์ข…๋ชฉ๋ช…'], ์‹œ๊ฐ€, condition, ํ˜„์žฌ๊ฐ€, result, qty)) return result, condition, qty # ๋งค๋„ ๊ตฌ๊ฐ„ ํ™•์ธ def profit_band_check(self, ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€): band_list = [0, 3, 5, 10, 15, 25] # print('ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€', ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€) ratio = value_round((ํ˜„์žฌ๊ฐ€ - ๋งค์ˆ˜๊ฐ€) / ๋งค์ˆ˜๊ฐ€ * 100, 2) # print('ratio', ratio) if ratio < 3: return 1 elif ratio in band_list: return band_list.index(ratio) + 1 else: band_list.adding(ratio) band_list.sort() band = band_list.index(ratio) band_list.remove(ratio) return band # ๋งค๋„ ์ „๋žต๋ณ„ ๋งค๋„ ์กฐ๊ฑด ํ™•์ธ def sell_strategy(self, code, price): # print('%s ๋งค๋„ ์กฐ๊ฑด ํ™•์ธ' % code) try: result = False band = self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ # ์ด์ „ ๋งค๋„ ๊ตฌ๊ฐ„ ๋ฐ›์Œ ๋งค๋„๋ฐฉ๋ฒ• = self.๋งค๋„๋ฐฉ๋ฒ• # '03' : ์‹œ์žฅ๊ฐ€ qty_ratio = 1 # ๋งค๋„ ์ˆ˜๋Ÿ‰ ๊ฒฐ์ • : ๋ณด์œ ์ˆ˜๋Ÿ‰ * qty_ratio ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€ = price # ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€] ๋งค์ˆ˜๊ฐ€ = self.portfolio[code].๋งค์ˆ˜๊ฐ€ # ์ „๋žต 1, 2, 3๊ณผ 4 ๋ณ„๋„ ์ฒดํฌ strategy = self.portfolio[code].๋งค๋„์ „๋žต # ์ „๋žต 1, 2, 3 if strategy != '4': # ๋งค๋„๋ฅผ ์œ„ํ•œ ์ˆ˜์ต๋ฅ  ๊ตฌ๊ฐ„ ์ฒดํฌ(๋งค์ˆ˜๊ฐ€ ๋Œ€๋น„ ํ˜„์žฌ๊ฐ€์˜ ์ˆ˜์ต๋ฅ  ์กฐ๊ฑด์— ๋‹ค๋ฅธ ๊ตฌ๊ฐ„ ์„ค์ •) new_band = self.profit_band_check(ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€) if (hogacal(์‹œ๊ฐ€, 0, self.portfolio[code].์‹œ์žฅ, '์ƒํ•œ๊ฐ€')) <= ํ˜„์žฌ๊ฐ€: band = 7 if band < new_band: # ์ด์ „ ๊ตฌ๊ฐ„๋ณด๋‹ค ํ˜„์žฌ ๊ตฌ๊ฐ„์ด ๋†’์„ ๊ฒฝ์šฐ(์‹œ์„ธ๊ฐ€ ์˜ฌ๋ผ๊ฐ„ ๊ฒฝ์šฐ)๋งŒ band = new_band # ๊ตฌ๊ฐ„์„ ํ˜„์žฌ ๊ตฌ๊ฐ„์œผ๋กœ ๋ณ€๊ฒฝ(๋ฐ˜๋Œ€์˜ ๊ฒฝ์šฐ๋Š” ๊ตฌ๊ฐ„ ์œ ์ง€) if band == 1 and ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + (self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[0] / 100)): result = True elif band == 2 and ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + (self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[1] / 100)): result = True elif band == 3 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[2] / 100)): result = True elif band == 4 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[3] / 100)): result = True elif band == 5 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[4] / 100)): result = True elif band == 6 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[5] / 100)): result = True elif band == 7 and ํ˜„์žฌ๊ฐ€ >= (hogacal(์‹œ๊ฐ€, -3, self.Stocklist[code]['์‹œ์žฅ'], '์ƒํ•œ๊ฐ€')): ๋งค๋„๋ฐฉ๋ฒ• = '00' # ์ง€์ •๊ฐ€ result = True self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = band # ํฌํŠธํด๋ฆฌ์˜ค์— ๋งค๋„๊ตฌ๊ฐ„ ์—…๋ฐ์ดํŠธ try: if strategy == '2' or strategy == '3': # ๋งค๋„์ „๋žต 2(๊ธฐ์กด 5) if strategy == '2': ๋ชฉํ‘œ๊ฐ€ = self.portfolio[code].๋งค๋„๊ฐ€[0] elif strategy == '3': ๋ชฉํ‘œ๊ฐ€ = (hogacal(์‹œ๊ฐ€ * 1.1, 0, self.Stocklist[code]['์‹œ์žฅ'], 'ํ˜„์žฌ๊ฐ€')) ๋งค๋„์กฐ๊ฑด = self.portfolio[code].๋งค๋„์กฐ๊ฑด # ๋งค๋„๊ฐ€ ์‹คํ–‰๋œ ์กฐ๊ฑด '': ๋งค๋„ ์ „, 'B':๊ตฌ๊ฐ„๋งค๋„, 'T':๋ชฉํ‘œ๊ฐ€๋งค๋„ targetting_band = self.profit_band_check(๋ชฉํ‘œ๊ฐ€, ๋งค์ˆ˜๊ฐ€) if band < targetting_band: # ํ˜„์žฌ๊ฐ€๊ตฌ๊ฐ„์ด ๋ชฉํ‘œ๊ฐ€๊ตฌ๊ฐ„ ๋ฏธ๋งŒ์ผ๋•Œ ์ „๋Ÿ‰๋งค๋„ qty_ratio = 1 else: # ํ˜„์žฌ๊ฐ€๊ตฌ๊ฐ„์ด ๋ชฉํ‘œ๊ฐ€๊ตฌ๊ฐ„ ์ด์ƒ์ผ ๋•Œ if ํ˜„์žฌ๊ฐ€ == ๋ชฉํ‘œ๊ฐ€: # ๋ชฉํ‘œ๊ฐ€ ๋„๋‹ฌ ์‹œ ์ ˆ๋ฐ˜ ๋งค๋„ self.portfolio[code].๋ชฉํ‘œ๋„๋‹ฌ = True # ๋ชฉํ‘œ๊ฐ€ ๋„๋‹ฌ ์—ฌ๋ถ€ True if ๋งค๋„์กฐ๊ฑด == '': # ๋งค๋„์ด๋ ฅ์ด ์—†๋Š” ๊ฒฝ์šฐ ๋ชฉํ‘œ๊ฐ€๋งค๋„ 'T', ์ ˆ๋ฐ˜ ๋งค๋„ self.portfolio[code].๋งค๋„์กฐ๊ฑด = 'T' result = True if self.portfolio[code].์ˆ˜๋Ÿ‰ == 1: qty_ratio = 1 else: qty_ratio = 0.5 elif ๋งค๋„์กฐ๊ฑด == 'B': # ๊ตฌ๊ฐ„ ๋งค๋„ ์ด๋ ฅ์ด ์žˆ์„ ๊ฒฝ์šฐ ์ ˆ๋ฐ˜๋งค๋„๊ฐ€ ๋œ ์ƒํƒœ์ด๋ฏ€๋กœ ๋‚จ์€ ์ „๋Ÿ‰๋งค๋„ result = True qty_ratio = 1 elif ๋งค๋„์กฐ๊ฑด == 'T': # ๋ชฉํ‘œ๊ฐ€ ๋งค๋„ ์ด๋ ฅ์ด ์žˆ์„ ๊ฒฝ์šฐ ๋งค๋„๋ฏธ์‹คํ–‰ result = False else: # ํ˜„์žฌ๊ฐ€๊ฐ€ ๋ชฉํ‘œ๊ฐ€๊ฐ€ ์•„๋‹ ๊ฒฝ์šฐ ๊ตฌ๊ฐ„ ๋งค๋„ ์‹คํ–‰(๋งค๋„์‹คํ–‰์—ฌ๋ถ€๋Š” ๊ฒฐ์ •๋œ ์ƒํƒœ) if self.portfolio[code].๋ชฉํ‘œ๋„๋‹ฌ == False: # ๋ชฉํ‘œ๊ฐ€ ๋„๋‹ฌ์„ ๋ชปํ•œ ๊ฒฝ์šฐ๋ฉด ์ „๋Ÿ‰๋งค๋„ qty_ratio = 1 else: if ๋งค๋„์กฐ๊ฑด == '': # ๋งค๋„์ด๋ ฅ์ด ์—†๋Š” ๊ฒฝ์šฐ ๊ตฌ๊ฐ„๋งค๋„ 'B', ์ ˆ๋ฐ˜ ๋งค๋„ self.portfolio[code].๋งค๋„์กฐ๊ฑด = 'B' if self.portfolio[code].์ˆ˜๋Ÿ‰ == 1: qty_ratio = 1 else: qty_ratio = 0.5 elif ๋งค๋„์กฐ๊ฑด == 'B': # ๊ตฌ๊ฐ„ ๋งค๋„ ์ด๋ ฅ์ด ์žˆ์„ ๊ฒฝ์šฐ ๋งค๋„๋ฏธ์‹คํ–‰ result = False elif ๋งค๋„์กฐ๊ฑด == 'T': # ๋ชฉํ‘œ๊ฐ€ ๋งค๋„ ์ด๋ ฅ์ด ์žˆ์„ ๊ฒฝ์šฐ ์ „๋Ÿ‰๋งค๋„ qty_ratio = 1 except Exception as e: print('sell_strategy ๋งค๋„์ „๋žต 2 Error :', e) logger.error('CTradeShortTerm_sell_strategy ์ข…๋ชฉ : %s ๋งค๋„์ „๋žต 2 Error : %s' % (code, e)) Telegram('[XTrader]CTradeShortTerm_sell_strategy ์ข…๋ชฉ : %s ๋งค๋„์ „๋žต 2 Error : %s' % (code, e), send='mc') result = False return ๋งค๋„๋ฐฉ๋ฒ•, result, qty_ratio # print('์ข…๋ชฉ์ฝ”๋“œ : %s, ํ˜„์žฌ๊ฐ€ : %s, ์‹œ๊ฐ€ : %s, ๊ณ ๊ฐ€ : %s, ๋งค๋„๊ตฌ๊ฐ„ : %s, ๊ฒฐ๊ณผ : %s' % (code, ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, band, result)) return ๋งค๋„๋ฐฉ๋ฒ•, result, qty_ratio # ์ „๋žต 4(์ง€์ •๊ฐ€ 00 ๋งค๋„) else: ๋งค๋„๋ฐฉ๋ฒ• = '00' # ์ง€์ •๊ฐ€ try: # ์ „๋žต 4์˜ ๋งค๋„๊ฐ€ = [๋ชฉํ‘œ๊ฐ€(์›), [์†์ ˆ๊ฐ€(%), ๋ณธ์ „๊ฐ€(%), 1์ฐจ์ต์ ˆ๊ฐ€(%), 2์ฐจ์ต์ ˆ๊ฐ€(%)]] # 1. ๋งค์ˆ˜ ํ›„ ์†์ ˆ๊ฐ€๊นŒ์ง€ ํ•˜๋ฝ์‹œ ๋งค๋„์ฃผ๋ฌธ -> ์†์ ˆ๊ฐ€, ์ „๋Ÿ‰๋งค๋„๋กœ ๋ if ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + self.portfolio[code].๋งค๋„๊ฐ€[1][0] / 100): self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 0 result = True qty_ratio = 1 # 2. 1์ฐจ์ต์ ˆ๊ฐ€ ๋„๋‹ฌ์‹œ ๋งค๋„์ฃผ๋ฌธ -> 1์ฐจ์ต์ ˆ๊ฐ€, 1/3 ๋งค๋„ elif self.portfolio[code].์ต์ ˆ๊ฐ€1๋„๋‹ฌ == False and ํ˜„์žฌ๊ฐ€ >= ๋งค์ˆ˜๊ฐ€ * ( 1 + self.portfolio[code].๋งค๋„๊ฐ€[1][2] / 100): self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 1 self.portfolio[code].์ต์ ˆ๊ฐ€1๋„๋‹ฌ = True result = True if self.portfolio[code].์ˆ˜๋Ÿ‰ == 1: qty_ratio = 1 elif self.portfolio[code].์ˆ˜๋Ÿ‰ == 2: qty_ratio = 0.5 else: qty_ratio = 0.3 # 3. 2์ฐจ์ต์ ˆ๊ฐ€ ๋„๋‹ฌ๋ชปํ•˜๊ณ  ๋ณธ์ „๊ฐ€๊นŒ์ง€ ํ•˜๋ฝ ๋˜๋Š” ๊ณ ๊ฐ€ -3%๊นŒ์ง€์‹œ ๋งค๋„์ฃผ๋ฌธ -> 1์ฐจ์ต์ ˆ๊ฐ€, ๋‚˜๋จธ์ง€ ์ „๋Ÿ‰ ๋งค๋„๋กœ ๋ elif self.portfolio[code].์ต์ ˆ๊ฐ€1๋„๋‹ฌ == True and self.portfolio[code].์ต์ ˆ๊ฐ€2๋„๋‹ฌ == False and ( (ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + self.portfolio[code].๋งค๋„๊ฐ€[1][1] / 100)) or (ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * 0.97)): self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 1.5 result = True qty_ratio = 1 # 4. 2์ฐจ ์ต์ ˆ๊ฐ€ ๋„๋‹ฌ ์‹œ ๋งค๋„์ฃผ๋ฌธ -> 2์ฐจ ์ต์ ˆ๊ฐ€, 1/3 ๋งค๋„ elif self.portfolio[code].์ต์ ˆ๊ฐ€1๋„๋‹ฌ == True and self.portfolio[code].์ต์ ˆ๊ฐ€2๋„๋‹ฌ == False and ํ˜„์žฌ๊ฐ€ >= ๋งค์ˆ˜๊ฐ€ * ( 1 + self.portfolio[code].๋งค๋„๊ฐ€[1][3] / 100): self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 2 self.portfolio[code].์ต์ ˆ๊ฐ€2๋„๋‹ฌ = True result = True if self.portfolio[code].์ˆ˜๋Ÿ‰ == 1: qty_ratio = 1 else: qty_ratio = 0.5 # 5. ๋ชฉํ‘œ๊ฐ€ ๋„๋‹ฌ๋ชปํ•˜๊ณ  2์ฐจ์ต์ ˆ๊ฐ€๊นŒ์ง€ ํ•˜๋ฝ ์‹œ ๋งค๋„์ฃผ๋ฌธ -> 2์ฐจ์ต์ ˆ๊ฐ€, ๋‚˜๋จธ์ง€ ์ „๋Ÿ‰ ๋งค๋„๋กœ ๋ elif self.portfolio[code].์ต์ ˆ๊ฐ€2๋„๋‹ฌ == True and self.portfolio[code].๋ชฉํ‘œ๊ฐ€๋„๋‹ฌ == False and ( (ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + self.portfolio[code].๋งค๋„๊ฐ€[1][2] / 100)) or (ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * 0.97)): self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 2.5 result = True qty_ratio = 1 # 6. ๋ชฉํ‘œ๊ฐ€ ๋„๋‹ฌ ์‹œ ๋งค๋„์ฃผ๋ฌธ -> ๋ชฉํ‘œ๊ฐ€, ๋‚˜๋จธ์ง€ ์ „๋Ÿ‰ ๋งค๋„๋กœ ๋ elif self.portfolio[code].๋ชฉํ‘œ๊ฐ€๋„๋‹ฌ == False and ํ˜„์žฌ๊ฐ€ >= self.portfolio[code].๋งค๋„๊ฐ€[0]: self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 3 self.portfolio[code].๋ชฉํ‘œ๊ฐ€๋„๋‹ฌ = True result = True qty_ratio = 1 return ๋งค๋„๋ฐฉ๋ฒ•, result, qty_ratio except Exception as e: print('sell_strategy ๋งค๋„์ „๋žต 4 Error :', e) logger.error('CTradeShortTerm_sell_strategy ์ข…๋ชฉ : %s ๋งค๋„์ „๋žต 4 Error : %s' % (code, e)) Telegram('[XTrader]CTradeShortTerm_sell_strategy ์ข…๋ชฉ : %s ๋งค๋„์ „๋žต 4 Error : %s' % (code, e), send='mc') result = False return ๋งค๋„๋ฐฉ๋ฒ•, result, qty_ratio except Exception as e: print('CTradeShortTerm_sell_strategy Error ', e) Telegram('[XTrader]CTradeShortTerm_sell_strategy Error : %s' % e, send='mc') logger.error('CTradeShortTerm_sell_strategy Error : %s' % e) result = False qty_ratio = 1 return ๋งค๋„๋ฐฉ๋ฒ•, result, qty_ratio # ๋ณด์œ ์ผ ์ „๋žต : ๋ณด์œ ๊ธฐ๊ฐ„์ด ๋ณด์œ ์ผ ์ด์ƒ์ผ ๊ฒฝ์šฐ ์ „๋Ÿ‰ ๋งค๋„ ์‹คํ–‰(Mainwindow ํƒ€์ด๋จธ์—์„œ ์‹œ๊ฐ„ ์ฒดํฌ) def hold_strategy(self): if self.holdcheck == True: print('๋ณด์œ ์ผ ๋งŒ๊ธฐ ๋งค๋„ ์ฒดํฌ') try: for code in list(self.portfolio.keys()): ๋ณด์œ ๊ธฐ๊ฐ„ = holdingcal(self.portfolio[code].๋งค์ˆ˜์ผ) print('์ข…๋ชฉ๋ช… : %s, ๋ณด์œ ์ผ : %s, ๋ณด์œ ๊ธฐ๊ฐ„ : %s' % (self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].๋ณด์œ ์ผ, ๋ณด์œ ๊ธฐ๊ฐ„)) if ๋ณด์œ ๊ธฐ๊ฐ„ >= int(self.portfolio[code].๋ณด์œ ์ผ) and self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting('S_%s' % code) is None and \ self.portfolio[code].์ˆ˜๋Ÿ‰ != 0: self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 0 (result, order) = self.์ •๋Ÿ‰๋งค๋„(sRQName='S_%s' % code, ์ข…๋ชฉ์ฝ”๋“œ=code, ๋งค๋„๊ฐ€=self.portfolio[code].๋งค์ˆ˜๊ฐ€, ์ˆ˜๋Ÿ‰=self.portfolio[code].์ˆ˜๋Ÿ‰) if result == True: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock['S_%s' % code] = True Telegram('[XTrader]์ •๋Ÿ‰๋งค๋„(๋ณด์œ ์ผ๋งŒ๊ธฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % ( code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰)) logger.info('์ •๋Ÿ‰๋งค๋„(๋ณด์œ ์ผ๋งŒ๊ธฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % ( code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰)) else: Telegram('[XTrader]์ •์•ก๋งค๋„์‹คํŒจ(๋ณด์œ ์ผ๋งŒ๊ธฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % ( code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰)) logger.info('์ •๋Ÿ‰๋งค๋„์‹คํŒจ(๋ณด์œ ์ผ๋งŒ๊ธฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % ( code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰)) except Exception as e: print("hold_strategy Error :", e) # ํฌํŠธํด๋ฆฌ์˜ค ์ƒ์„ฑ def set_portfolio(self, code, buyprice, condition): try: self.portfolio[code] = CPortStock_ShortTerm(๋ฒˆํ˜ธ=self.Stocklist[code]['๋ฒˆํ˜ธ'], ์ข…๋ชฉ์ฝ”๋“œ=code, ์ข…๋ชฉ๋ช…=self.Stocklist[code]['์ข…๋ชฉ๋ช…'], ์‹œ์žฅ=self.Stocklist[code]['์‹œ์žฅ'], ๋งค์ˆ˜๊ฐ€=buyprice, ๋งค์ˆ˜์กฐ๊ฑด=condition, ๋ณด์œ ์ผ=self.Stocklist['์ „๋žต']['๋ณด์œ ์ผ'], ๋งค๋„์ „๋žต=self.Stocklist[code]['๋งค๋„์ „๋žต'], ๋งค๋„๊ฐ€=self.Stocklist[code]['๋งค๋„๊ฐ€'], ๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด=self.Stocklist['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'], ๋งค์ˆ˜์ผ=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) self.Stocklist[code]['๋งค์ˆ˜์ผ'] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') # ๋งค๋งค์ด๋ ฅ ์—…๋ฐ์ดํŠธ๋ฅผ ์œ„ํ•ด ๋งค์ˆ˜์ผ ์ถ”๊ฐ€ except Exception as e: print('CTradeShortTerm_set_portfolio Error ', e) Telegram('[XTrader]CTradeShortTerm_set_portfolio Error : %s' % e, send='mc') logger.error('CTradeShortTerm_set_portfolio Error : %s' % e) # Robot_Run์ด ๋˜๋ฉด ์‹คํ–‰๋จ - ๋งค์ˆ˜/๋งค๋„ ์ข…๋ชฉ์„ ๋ฆฌ์ŠคํŠธ๋กœ ์ €์žฅ def ์ดˆ๊ธฐ์กฐ๊ฑด(self, codes): # ๋งค์ˆ˜์ด์•ก ๊ณ„์‚ฐํ•˜๊ธฐ # ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ ๋ฆฌ์ŠคํŠธ ๋ณ€์ˆ˜ ์ดˆ๊ธฐํ™” # ๋งค๋„ํ• ์ข…๋ชฉ : ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ๋˜ ์ข…๋ชฉ ์ถ”๊ฐ€ # ๋งค์ˆ˜ํ• ์ข…๋ชฉ : ๊ตฌ๊ธ€์—์„œ ๋ฐ›์€ ์ข…๋ชฉ ์ถ”๊ฐ€ self.parent.statusbar.showMessage("[%s] ์ดˆ๊ธฐ์กฐ๊ฑด์ค€๋น„" % (self.sName)) self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ = [] # ์žฅ ๋งˆ๊ฐ ํ›„ ๊ธˆ์ผ ๋งค๋„ํ•œ ์ข…๋ชฉ์— ๋Œ€ํ•ด์„œ ๋งค๋งค์ด๋ ฅ ์ •๋ฆฌ ์—…๋ฐ์ดํŠธ(๋งค๋„๊ฐ€, ์†์ต๋ฅ  ๋“ฑ) self.๋งค๋„ํ• ์ข…๋ชฉ = [] self.๋งค์ˆ˜ํ• ์ข…๋ชฉ = [] self.๋งค์ˆ˜์ด์•ก = 0 self.holdcheck = False for code in codes: # ๊ตฌ๊ธ€ ์‹œํŠธ์—์„œ import๋œ ๋งค์ˆ˜ ๋ชจ๋‹ˆ์ปค๋ง ์ข…๋ชฉ์€ '๋งค์ˆ˜ํ• ์ข…๋ชฉ'์— ์ถ”๊ฐ€ self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.adding(code) # ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ๋Š” ์ข…๋ชฉ์€ ๋งค๋„ ๊ด€๋ จ ์ „๋žต ์žฌํ™•์ธ(๊ตฌ๊ธ€์‹œํŠธ) ๋ฐ '๋งค๋„ํ• ์ข…๋ชฉ'์— ์ถ”๊ฐ€ if length(self.portfolio) > 0: row_data = shortterm_sell_sheet.getting_total_all_values() idx_holding = row_data[0].index('๋ณด์œ ์ผ') idx_strategy = row_data[0].index('๋งค๋„์ „๋žต') idx_loss = row_data[0].index('์†์ ˆ๊ฐ€') idx_sellprice = row_data[0].index('๋ชฉํ‘œ๊ฐ€') for row in row_data[1:]: code, name, market = getting_code(row[1]) # ์ข…๋ชฉ๋ช…์œผ๋กœ ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ ๋ฐ›์•„์„œ(getting_code ํ•จ์ˆ˜) ์ถ”๊ฐ€ if code in list(self.portfolio.keys()): self.portfolio[code].๋ณด์œ ์ผ = row[idx_holding] self.portfolio[code].๋งค๋„์ „๋žต = row[idx_strategy] self.portfolio[code].๋งค๋„๊ฐ€ = [] # ๋งค๋„ ์ „๋žต ๋ณ€๊ฒฝ์— ๋”ฐ๋ผ ๋งค๋„๊ฐ€ ์ดˆ๊ธฐํ™” # ๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด = [์†์ ˆ๊ฐ€(%), ๋ณธ์ „๊ฐ€(%), ๊ตฌ๊ฐ„3 ๊ณ ๊ฐ€๋Œ€๋น„(%), ๊ตฌ๊ฐ„4 ๊ณ ๊ฐ€๋Œ€๋น„(%), ๊ตฌ๊ฐ„5 ๊ณ ๊ฐ€๋Œ€๋น„(%), ๊ตฌ๊ฐ„6 ๊ณ ๊ฐ€๋Œ€๋น„(%)] self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด = [] self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด.adding(value_round(((int(float(row[idx_loss].replacing(',', ''))) / self.portfolio[code].๋งค์ˆ˜๊ฐ€) - 1) * 100, 1)) # ์†์ ˆ๊ฐ€๋ฅผ ํผ์„ผํŠธ๋กœ ๋ณ€ํ™˜ํ•˜์—ฌ ์—…๋ฐ์ดํŠธ for idx in range(1, length(self.Stocklist['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'])): # Stocklist์˜ ๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด ์ „์ฒด๋ฅผ ๋ฐ”๋กœ addingํ•  ๊ฒฝ์šฐ ๋ชจ๋“  ์ข…๋ชฉ์ด ๋™์ผํ•œ ๊ฐ’์œผ๋กœ ๋“ค์–ด๊ฐ self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด.adding(self.Stocklist['์ „๋žต']['๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด'][idx]) if self.portfolio[code].๋งค๋„์ „๋žต == '4': # ๋งค๋„๊ฐ€ = [๋ชฉํ‘œ๊ฐ€(์›), [์†์ ˆ๊ฐ€(%), ๋ณธ์ „๊ฐ€(%), 1์ฐจ์ต์ ˆ๊ฐ€(%), 2์ฐจ์ต์ ˆ๊ฐ€(%)]] self.portfolio[code].๋งค๋„๊ฐ€.adding(int(float(row[idx_sellprice].replacing(',', '')))) self.portfolio[code].๋งค๋„๊ฐ€.adding([]) for idx in range(length(self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€'])): # Stocklist์˜ ์ „๋žต๋งค๋„๊ฐ€ ์ „์ฒด๋ฅผ ๋ฐ”๋กœ addingํ•  ๊ฒฝ์šฐ ๋ชจ๋“  ์ข…๋ชฉ์ด ๋™์ผํ•œ ๊ฐ’์œผ๋กœ ๋“ค์–ด๊ฐ self.portfolio[code].๋งค๋„๊ฐ€[1].adding(self.Stocklist['์ „๋žต']['์ „๋žต๋งค๋„๊ฐ€'][idx]) self.portfolio[code].๋งค๋„๊ฐ€[1][0] = self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[0] # float(row[idx_loss].replacing('%', '')) self.portfolio[code].sellcount = 0 self.portfolio[code].๋งค๋„๋‹จ์œ„์ˆ˜๋Ÿ‰ = 0 # ์ „๋žต4์˜ ๊ธฐ๋ณธ ๋งค๋„ ๋‹จ์œ„๋Š” ๋ณด์œ ์ˆ˜๋Ÿ‰์˜ 1/3 self.portfolio[code].์ต์ ˆ๊ฐ€1๋„๋‹ฌ = False self.portfolio[code].์ต์ ˆ๊ฐ€2๋„๋‹ฌ = False self.portfolio[code].๋ชฉํ‘œ๊ฐ€๋„๋‹ฌ = False else: if self.portfolio[code].๋งค๋„์ „๋žต == '2' or self.portfolio[code].๋งค๋„์ „๋žต == '3': self.portfolio[code].๋ชฉํ‘œ๋„๋‹ฌ = False # ๋ชฉํ‘œ๊ฐ€(๋งค๋„๊ฐ€) ๋„๋‹ฌ ์ฒดํฌ(False ์ƒํƒœ๋กœ ๊ตฌ๊ฐ„ ์ปท์ผ๊ฒฝ์šฐ ์ „๋Ÿ‰ ๋งค๋„) self.portfolio[code].๋งค๋„์กฐ๊ฑด = '' # ๊ตฌ๊ฐ„๋งค๋„ : B, ๋ชฉํ‘œ๋งค๋„ : T for port_code in list(self.portfolio.keys()): # ๋กœ๋ด‡ ์‹œ์ž‘ ์‹œ ํฌํŠธํด๋ฆฌ์˜ค ์ข…๋ชฉ์˜ ๋งค๋„๊ตฌ๊ฐ„(์ „์ผ ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง)์„ 1๋กœ ์ดˆ๊ธฐํ™” # ๊ตฌ๊ฐ„์ด ๋‚ด๋ ค๊ฐ€๋Š” ๊ฑด ๋ฐ˜์˜ํ•˜์ง€ ์•Š์œผ๋ฏ€๋กœ ์ดˆ๊ธฐํ™”๋ฅผ ์‹œ์ผœ์„œ ๋‹ค์‹œ ๊ตฌ๊ฐ„ ์ฒดํฌ ์‹œ์ž‘ํ•˜๊ธฐ ์œ„ํ•จ self.portfolio[port_code].๋งค๋„๊ตฌ๊ฐ„ = 1 # ๋งค๋„ ๊ตฌ๊ฐ„์€ ๋กœ๋ด‡ ์‹คํ–‰ ์‹œ ๋งˆ๋‹ค ์ดˆ๊ธฐํ™”์‹œํ‚ด # ๋งค์ˆ˜์ด์•ก๊ณ„์‚ฐ self.๋งค์ˆ˜์ด์•ก += (self.portfolio[port_code].๋งค์ˆ˜๊ฐ€ * self.portfolio[port_code].์ˆ˜๋Ÿ‰) # ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ๋Š” ์ข…๋ชฉ์ด ๊ตฌ๊ธ€์—์„œ ๋ฐ›์•„์„œ ๋งŒ๋“  Stocklist์— ์—†์„ ๊ฒฝ์šฐ๋งŒ ์ถ”๊ฐ€ํ•จ # ์ด ์กฐ๊ฑด์ด ์—†์„ ๊ฒฝ์šฐ ๊ตฌ๊ธ€์—์„œ ๋ฐ›์€ ์ „๋žต๋“ค์ด ์•„๋‹Œ ๊ณผ๊ฑฐ ์ „๋žต์ด ํฌํŠธํด๋ฆฌ์˜ค์—์„œ ๋„˜์–ด๊ฐ # ๊ทผ๋ฐ ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ๋Š” ์ข…๋ชฉ์„ ์™œ Stocklist์— ๋„ฃ์–ด์•ผ๋˜๋Š”์ง€ ๋ชจ๋ฅด๊ฒ ์Œ(๋‚ด๊ฐ€ ํ•˜๊ณ ๋„...) if port_code not in list(self.Stocklist.keys()): self.Stocklist[port_code] = { '๋ฒˆํ˜ธ': self.portfolio[port_code].๋ฒˆํ˜ธ, '์ข…๋ชฉ๋ช…': self.portfolio[port_code].์ข…๋ชฉ๋ช…, '์ข…๋ชฉ์ฝ”๋“œ': self.portfolio[port_code].์ข…๋ชฉ์ฝ”๋“œ, '์‹œ์žฅ': self.portfolio[port_code].์‹œ์žฅ, '๋งค์ˆ˜์กฐ๊ฑด': self.portfolio[port_code].๋งค์ˆ˜์กฐ๊ฑด, '๋งค์ˆ˜๊ฐ€': self.portfolio[port_code].๋งค์ˆ˜๊ฐ€, '๋งค๋„์ „๋žต': self.portfolio[port_code].๋งค๋„์ „๋žต, '๋งค๋„๊ฐ€': self.portfolio[port_code].๋งค๋„๊ฐ€ } self.๋งค๋„ํ• ์ข…๋ชฉ.adding(port_code) # for stock in kf_keeplist['์ข…๋ชฉ๋ฒˆํ˜ธ'].values: # ๋ณด์œ  ์ข…๋ชฉ ์ฒดํฌํ•ด์„œ ๋งค๋„ ์ข…๋ชฉ์— ์ถ”๊ฐ€ โ†’ ๋กœ๋ด‡์ด ๋‘๊ฐœ ์ด์ƒ์ผ ๊ฒฝ์šฐ ์ค‘๋ณต๋˜๋ฏ€๋กœ ๋ฏธ์ ์šฉ # self.๋งค๋„ํ• ์ข…๋ชฉ.adding(stock) # ์ข…๋ชฉ๋ช… = kf_keeplist[kf_keeplist['์ข…๋ชฉ๋ฒˆํ˜ธ']==stock]['์ข…๋ชฉ๋ช…'].values[0] # ๋งค์ž…๊ฐ€ = kf_keeplist[kf_keeplist['์ข…๋ชฉ๋ฒˆํ˜ธ']==stock]['๋งค์ž…๊ฐ€'].values[0] # ๋ณด์œ ์ˆ˜๋Ÿ‰ = kf_keeplist[kf_keeplist['์ข…๋ชฉ๋ฒˆํ˜ธ']==stock]['๋ณด์œ ์ˆ˜๋Ÿ‰'].values[0] # print('์ข…๋ชฉ์ฝ”๋“œ : %s, ์ข…๋ชฉ๋ช… : %s, ๋งค์ž…๊ฐ€ : %s, ๋ณด์œ ์ˆ˜๋Ÿ‰ : %s' %(stock, ์ข…๋ชฉ๋ช…, ๋งค์ž…๊ฐ€, ๋ณด์œ ์ˆ˜๋Ÿ‰)) # self.portfolio[stock] = CPortStock_ShortTerm(์ข…๋ชฉ์ฝ”๋“œ=stock, ์ข…๋ชฉ๋ช…=์ข…๋ชฉ๋ช…, ๋งค์ˆ˜๊ฐ€=๋งค์ž…๊ฐ€, ์ˆ˜๋Ÿ‰=๋ณด์œ ์ˆ˜๋Ÿ‰, ๋งค์ˆ˜์ผ='') def ์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ(self, param): try: if self.running == True: ์ฒด๊ฒฐ์‹œ๊ฐ„ = '%s %s:%s:%s' % (str(self.d), param['์ฒด๊ฒฐ์‹œ๊ฐ„'][0:2], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][2:4], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][4:]) ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] ํ˜„์žฌ๊ฐ€ = abs(int(float(param['ํ˜„์žฌ๊ฐ€']))) ์ „์ผ๋Œ€๋น„ = int(float(param['์ „์ผ๋Œ€๋น„'])) ๋“ฑ๋ฝ๋ฅ  = float(param['๋“ฑ๋ฝ๋ฅ ']) ๋งค๋„ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค๋„ํ˜ธ๊ฐ€']))) ๋งค์ˆ˜ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค์ˆ˜ํ˜ธ๊ฐ€']))) ๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰ = abs(int(float(param['๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰']))) ์‹œ๊ฐ€ = abs(int(float(param['์‹œ๊ฐ€']))) ๊ณ ๊ฐ€ = abs(int(float(param['๊ณ ๊ฐ€']))) ์ €๊ฐ€ = abs(int(float(param['์ €๊ฐ€']))) ๊ฑฐ๋ž˜ํšŒ์ „์œจ = abs(float(param['๊ฑฐ๋ž˜ํšŒ์ „์œจ'])) ์‹œ๊ฐ€์ด์•ก = abs(int(float(param['์‹œ๊ฐ€์ด์•ก']))) ์ข…๋ชฉ๋ช… = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][1] # pool[์ข…๋ชฉ์ฝ”๋“œ] = [์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€, ์‹œ๊ฐ€์ด์•ก] ์ „์ผ์ข…๊ฐ€ = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][3] ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€] self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (์ฒด๊ฒฐ์‹œ๊ฐ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, ์ „์ผ๋Œ€๋น„)) self.wr.writerow([์ฒด๊ฒฐ์‹œ๊ฐ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, ์ „์ผ๋Œ€๋น„]) # ๋งค์ˆ˜ ์กฐ๊ฑด # ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์ข…๋ฃŒ ์‹œ๊ฐ„ ํ™•์ธ if current_time < self.Stocklist['์ „๋žต']['๋ชจ๋‹ˆํ„ฐ๋ง์ข…๋ฃŒ์‹œ๊ฐ„']: if ์ข…๋ชฉ์ฝ”๋“œ in self.๋งค์ˆ˜ํ• ์ข…๋ชฉ and ์ข…๋ชฉ์ฝ”๋“œ not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: # ๋งค์ˆ˜์ด์•ก + ์ข…๋ชฉ๋‹จ์œ„ํˆฌ์ž๊ธˆ์ด ํˆฌ์ž์ด์•ก๋ณด๋‹ค ์ž‘์Œ and ๋งค์ˆ˜์ฃผ๋ฌธ์‹คํ–‰์ค‘Lock์— ์—†์Œ -> ์ถ”๊ฐ€๋งค์ˆ˜๋ฅผ ์œ„ํ•ด์„œ and ํฌํŠธํด๋ฆฌ์˜ค์— ์—†์Œ ์กฐ๊ฑด ์‚ญ์ œ if (self.๋งค์ˆ˜์ด์•ก + self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋‹จ์œ„ํˆฌ์ž๊ธˆ'] < self.ํˆฌ์ž์ด์•ก) and self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting( 'B_%s' % ์ข…๋ชฉ์ฝ”๋“œ) is None and length( self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋งค์ˆ˜๊ฐ€']) > 0: # and self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) is None # ๋งค์ˆ˜ ์ „๋žต๋ณ„ ๋ชจ๋‹ˆํ„ฐ๋ง ์ฒดํฌ buy_check, condition, qty = self.buy_strategy(์ข…๋ชฉ์ฝ”๋“œ, ์‹œ์„ธ) if buy_check == True and (self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋‹จ์œ„ํˆฌ์ž๊ธˆ'] // ํ˜„์žฌ๊ฐ€ > 0): (result, order) = self.์ •๋Ÿ‰๋งค์ˆ˜(sRQName='B_%s' % ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ๋งค์ˆ˜๊ฐ€=ํ˜„์žฌ๊ฐ€, ์ˆ˜๋Ÿ‰=qty) if result == True: if self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) is None: # ํฌํŠธํด๋ฆฌ์˜ค์— ์—†์œผ๋ฉด ์‹ ๊ทœ ์ €์žฅ self.set_portfolio(์ข…๋ชฉ์ฝ”๋“œ, ํ˜„์žฌ๊ฐ€, condition) self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock['B_%s' % ์ข…๋ชฉ์ฝ”๋“œ] = True Telegram('[XTrader]๋งค์ˆ˜์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s, ๋งค์ˆ˜์กฐ๊ฑด=%s, ๋งค์ˆ˜์ˆ˜๋Ÿ‰=%s' % ( ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, condition, qty)) logger.info('๋งค์ˆ˜์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s, ๋งค์ˆ˜์กฐ๊ฑด=%s, ๋งค์ˆ˜์ˆ˜๋Ÿ‰=%s' % ( ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, condition, qty)) else: Telegram('[XTrader]๋งค์ˆ˜์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s, ๋งค์ˆ˜์กฐ๊ฑด=%s' % ( ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, condition)) logger.info('๋งค์ˆ˜์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s, ๋งค์ˆ˜์กฐ๊ฑด=%s' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, condition)) else: if self.๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง์ฒดํฌ == False: for code in self.๋งค์ˆ˜ํ• ์ข…๋ชฉ: if self.portfolio.getting(code) is not None and code not in self.๋งค๋„ํ• ์ข…๋ชฉ: Telegram('[XTrader]๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง๋งˆ๊ฐ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์ „ํ™˜' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…)) logger.info('๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง๋งˆ๊ฐ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์ „ํ™˜' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…)) self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.remove(code) self.๋งค๋„ํ• ์ข…๋ชฉ.adding(code) self.๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง์ฒดํฌ = True logger.info('๋งค๋„ํ•  ์ข…๋ชฉ :%s' % self.๋งค๋„ํ• ์ข…๋ชฉ) # ๋งค๋„ ์กฐ๊ฑด if ์ข…๋ชฉ์ฝ”๋“œ in self.๋งค๋„ํ• ์ข…๋ชฉ: # ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ์Œ and ๋งค๋„์ฃผ๋ฌธ์‹คํ–‰์ค‘Lock์— ์—†์Œ and ๋งค์ˆ˜์ฃผ๋ฌธ์‹คํ–‰์ค‘Lock์— ์—†์Œ if self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) is not None and self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting( 'S_%s' % ์ข…๋ชฉ์ฝ”๋“œ) is None: # and self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting('B_%s' % ์ข…๋ชฉ์ฝ”๋“œ) is None: # ๋งค๋„ ์ „๋žต๋ณ„ ๋ชจ๋‹ˆํ„ฐ๋ง ์ฒดํฌ ๋งค๋„๋ฐฉ๋ฒ•, sell_check, ratio = self.sell_strategy(์ข…๋ชฉ์ฝ”๋“œ, ์‹œ์„ธ) if sell_check == True: if ๋งค๋„๋ฐฉ๋ฒ• == '00': (result, order) = self.์ •์•ก๋งค๋„(sRQName='S_%s' % ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ๋งค๋„๊ฐ€=ํ˜„์žฌ๊ฐ€, ์ˆ˜๋Ÿ‰=value_round(self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰ * ratio)) else: (result, order) = self.์ •๋Ÿ‰๋งค๋„(sRQName='S_%s' % ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ๋งค๋„๊ฐ€=ํ˜„์žฌ๊ฐ€, ์ˆ˜๋Ÿ‰=value_round(self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰ * ratio)) if result == True: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock['S_%s' % ์ข…๋ชฉ์ฝ”๋“œ] = True Telegram('[XTrader]๋งค๋„์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ฐ€=%s, ๋งค๋„์ „๋žต=%s, ๋งค๋„๊ตฌ๊ฐ„=%s, ์ˆ˜๋Ÿ‰=%s' % ( ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ „๋žต, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ตฌ๊ฐ„, int(self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰ * ratio))) if self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ „๋žต == '2': logger.info( '๋งค๋„์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ฐ€=%s, ๋งค๋„์ „๋žต=%s, ๋งค๋„๊ตฌ๊ฐ„=%s, ๋ชฉํ‘œ๋„๋‹ฌ=%s, ๋งค๋„์กฐ๊ฑด=%s, ์ˆ˜๋Ÿ‰=%s' % ( ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ „๋žต, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ตฌ๊ฐ„, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋ชฉํ‘œ๋„๋‹ฌ, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์กฐ๊ฑด, int(self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰ * ratio))) else: logger.info('๋งค๋„์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ฐ€=%s, ๋งค๋„์ „๋žต=%s, ๋งค๋„๊ตฌ๊ฐ„=%s, ์ˆ˜๋Ÿ‰=%s' % ( ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ „๋žต, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ตฌ๊ฐ„, int(self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰ * ratio))) else: Telegram( '[XTrader]๋งค๋„์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ฐ€=%s, ๋งค๋„์ „๋žต=%s, ๋งค๋„๊ตฌ๊ฐ„=%s, ์ˆ˜๋Ÿ‰=%s' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.portfolio[ ์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ „๋žต, self.portfolio[ ์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ตฌ๊ฐ„, self.portfolio[ ์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰ * ratio)) logger.info('๋งค๋„์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ฐ€=%s, ๋งค๋„์ „๋žต=%s, ๋งค๋„๊ตฌ๊ฐ„=%s, ์ˆ˜๋Ÿ‰=%s' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.portfolio[ ์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ „๋žต, self.portfolio[ ์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ตฌ๊ฐ„, self.portfolio[ ์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰ * ratio)) except Exception as e: print('CTradeShortTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error : %s, %s' % (์ข…๋ชฉ๋ช…, e)) Telegram('[XTrader]CTradeShortTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error : %s, %s' % (์ข…๋ชฉ๋ช…, e), send='mc') logger.error('CTradeShortTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error :%s, %s' % (์ข…๋ชฉ๋ช…, e)) def ์ ‘์ˆ˜์ฒ˜๋ฆฌ(self, param): pass def ์ฒด๊ฒฐ์ฒ˜๋ฆฌ(self, param): ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] ์ฃผ๋ฌธ๋ฒˆํ˜ธ = param['์ฃผ๋ฌธ๋ฒˆํ˜ธ'] self.์ฃผ๋ฌธ๊ฒฐ๊ณผ[์ฃผ๋ฌธ๋ฒˆํ˜ธ] = param ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ = int(param['์ฃผ๋ฌธ์ˆ˜๋Ÿ‰']) ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ = int(param['๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰']) ์ฒด๊ฒฐ๊ฐ€ = int(0 if (param['์ฒด๊ฒฐ๊ฐ€'] is None or param['์ฒด๊ฒฐ๊ฐ€'] == '') else param['์ฒด๊ฒฐ๊ฐ€']) # ๋งค์ž…๊ฐ€ ๋™์ผ ๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰ = int(0 if (param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰'] is None or param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰'] == '') else param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰']) ๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ = int(0 if (param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ'] is None or param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ'] == '') else param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ']) ๋‹น์ผ๋งค๋งค์„ธ๊ธˆ = int(0 if (param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] is None or param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] == '') else param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ']) # ๋งค์ˆ˜ if param['๋งค๋„์ˆ˜๊ตฌ๋ถ„'] == '2': if self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘.getting(์ฃผ๋ฌธ๋ฒˆํ˜ธ) is not None: ์ฃผ๋ฌธ = self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘[์ฃผ๋ฌธ๋ฒˆํ˜ธ] ๋งค์ˆ˜๊ฐ€ = int(์ฃผ๋ฌธ[2:]) # ๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€ = int(0 if (param['๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€'] is None or param['๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€'] == '') else param['๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€']) # logger.debug('๋งค์ˆ˜-------> %s %s %s %s %s' % (param['์ข…๋ชฉ์ฝ”๋“œ'], param['์ข…๋ชฉ๋ช…'], ๋งค์ˆ˜๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ - ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰, ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰)) P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.์ข…๋ชฉ๋ช… = param['์ข…๋ชฉ๋ช…'] P.๋งค์ˆ˜๊ฐ€ = ์ฒด๊ฒฐ๊ฐ€ # ๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€ P.์ˆ˜๋Ÿ‰ += ๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰ # ์ถ”๊ฐ€ ๋งค์ˆ˜ ๋Œ€๋น„ํ•ด์„œ ๊ธฐ์กด ์ˆ˜๋Ÿ‰์— ์ฒด๊ฒฐ๋œ ์ˆ˜๋Ÿ‰ ๊ณ„์† ๋”ํ•จ(์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ - ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰) P.๋งค์ˆ˜์ผ = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') else: logger.error('ERROR ํฌํŠธ์— ์ข…๋ชฉ์ด ์—†์Œ !!!!') if ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ == 0: try: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.pop(์ฃผ๋ฌธ) if self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋งค์ˆ˜์ฃผ๋ฌธ์™„๋ฃŒ'] >= self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋งค์ˆ˜๊ฐ€์ „๋žต']: self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.remove(์ข…๋ชฉ์ฝ”๋“œ) self.๋งค๋„ํ• ์ข…๋ชฉ.adding(์ข…๋ชฉ์ฝ”๋“œ) Telegram('[XTrader]๋ถ„ํ•  ๋งค์ˆ˜ ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ข…๋ชฉ์ฝ”๋“œ:%s ๋งค์ˆ˜๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (P.์ข…๋ชฉ๋ช…, ์ข…๋ชฉ์ฝ”๋“œ, P.๋งค์ˆ˜๊ฐ€, P.์ˆ˜๋Ÿ‰)) logger.info('๋ถ„ํ•  ๋งค์ˆ˜ ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ข…๋ชฉ์ฝ”๋“œ:%s ๋งค์ˆ˜๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (P.์ข…๋ชฉ๋ช…, ์ข…๋ชฉ์ฝ”๋“œ, P.๋งค์ˆ˜๊ฐ€, P.์ˆ˜๋Ÿ‰)) self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ]['์ˆ˜๋Ÿ‰'] = P.์ˆ˜๋Ÿ‰ self.Stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋งค์ˆ˜๊ฐ€'].pop(0) self.๋งค์ˆ˜์ด์•ก += (P.๋งค์ˆ˜๊ฐ€ * P.์ˆ˜๋Ÿ‰) logger.debug('์ฒด๊ฒฐ์ฒ˜๋ฆฌ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ๋งค์ˆ˜์ด์•ก๊ณ„์‚ฐ์™„๋ฃŒ:%s' % (P.์ข…๋ชฉ๋ช…, self.๋งค์ˆ˜์ด์•ก)) self.save_history(์ข…๋ชฉ์ฝ”๋“œ, status='๋งค์ˆ˜') Telegram('[XTrader]๋งค์ˆ˜์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ๋งค์ˆ˜๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (P.์ข…๋ชฉ๋ช…, P.๋งค์ˆ˜๊ฐ€, P.์ˆ˜๋Ÿ‰)) logger.info('๋งค์ˆ˜์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ๋งค์ˆ˜๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (P.์ข…๋ชฉ๋ช…, P.๋งค์ˆ˜๊ฐ€, P.์ˆ˜๋Ÿ‰)) except Exception as e: Telegram('[XTrader]์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค์ˆ˜ ์—๋Ÿฌ ์ข…๋ชฉ๋ช…:%s, %s ' % (P.์ข…๋ชฉ๋ช…, e), send='mc') logger.error('์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค์ˆ˜ ์—๋Ÿฌ ์ข…๋ชฉ๋ช…:%s, %s ' % (P.์ข…๋ชฉ๋ช…, e)) # ๋งค๋„ if param['๋งค๋„์ˆ˜๊ตฌ๋ถ„'] == '1': if self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘.getting(์ฃผ๋ฌธ๋ฒˆํ˜ธ) is not None: ์ฃผ๋ฌธ = self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘[์ฃผ๋ฌธ๋ฒˆํ˜ธ] ๋งค๋„๊ฐ€ = int(์ฃผ๋ฌธ[2:]) try: if ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ == 0: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.pop(์ฃผ๋ฌธ) P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.์ข…๋ชฉ๋ช… = param['์ข…๋ชฉ๋ช…'] self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ฒด๊ฒฐ๊ฐ€ = ์ฒด๊ฒฐ๊ฐ€ self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ˆ˜๋Ÿ‰ = ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ self.save_history(์ข…๋ชฉ์ฝ”๋“œ, status='๋งค๋„') Telegram('[XTrader]๋งค๋„์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ฒด๊ฒฐ๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (param['์ข…๋ชฉ๋ช…'], ์ฒด๊ฒฐ๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰)) logger.info('๋งค๋„์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ฒด๊ฒฐ๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (param['์ข…๋ชฉ๋ช…'], ์ฒด๊ฒฐ๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰)) except Exception as e: Telegram('[XTrader]์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค๋„ Error : %s' % e, send='mc') logger.error('์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค๋„ Error : %s' % e) # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() def ์ž”๊ณ ์ฒ˜๋ฆฌ(self, param): # print('CTradeShortTerm : ์ž”๊ณ ์ฒ˜๋ฆฌ') ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.๋งค์ˆ˜๊ฐ€ = int(0 if (param['๋งค์ž…๋‹จ๊ฐ€'] is None or param['๋งค์ž…๋‹จ๊ฐ€'] == '') else param['๋งค์ž…๋‹จ๊ฐ€']) P.์ˆ˜๋Ÿ‰ = int(0 if (param['๋ณด์œ ์ˆ˜๋Ÿ‰'] is None or param['๋ณด์œ ์ˆ˜๋Ÿ‰'] == '') else param['๋ณด์œ ์ˆ˜๋Ÿ‰']) if P.์ˆ˜๋Ÿ‰ == 0: self.portfolio.pop(์ข…๋ชฉ์ฝ”๋“œ) self.๋งค๋„ํ• ์ข…๋ชฉ.remove(์ข…๋ชฉ์ฝ”๋“œ) if ์ข…๋ชฉ์ฝ”๋“œ not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ.adding(์ข…๋ชฉ์ฝ”๋“œ) logger.info('์ž”๊ณ ์ฒ˜๋ฆฌ_ํฌํŠธํด๋ฆฌ์˜คPOP %s ' % ์ข…๋ชฉ์ฝ”๋“œ) # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() def Run(self, flag=True, sAccount=None): self.running = flag ret = 0 # self.manual_portfolio() for code in list(self.portfolio.keys()): print(self.portfolio[code].__dict__) logger.info(self.portfolio[code].__dict__) if flag == True: print("%s ROBOT ์‹คํ–‰" % (self.sName)) try: Telegram("[XTrader]%s ROBOT ์‹คํ–‰" % (self.sName)) self.sAccount = sAccount self.ํˆฌ์ž์ด์•ก = floor(int(d2deposit.replacing(",", "")) * (self.Stocklist['์ „๋žต']['ํˆฌ์ž๊ธˆ๋น„์ค‘'] / 100)) print('๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ : ', ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ) print('D+2 ์˜ˆ์ˆ˜๊ธˆ : ', int(d2deposit.replacing(",", ""))) print('ํˆฌ์ž ์ด์•ก : ', self.ํˆฌ์ž์ด์•ก) print('Stocklist : ', self.Stocklist) # self.์ตœ๋Œ€ํฌํŠธ์ˆ˜ = floor(int(d2deposit.replacing(",", "")) / self.๋‹จ์œ„ํˆฌ์ž๊ธˆ / length(self.parent.robots)) # print(self.์ตœ๋Œ€ํฌํŠธ์ˆ˜) self.์ฃผ๋ฌธ๊ฒฐ๊ณผ = dict() self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘ = dict() self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock = dict() codes = list(self.Stocklist.keys()) codes.remove('์ „๋žต') codes.remove('์ปฌ๋Ÿผ๋ช…') self.์ดˆ๊ธฐ์กฐ๊ฑด(codes) print("๋งค๋„ : ", self.๋งค๋„ํ• ์ข…๋ชฉ) print("๋งค์ˆ˜ : ", self.๋งค์ˆ˜ํ• ์ข…๋ชฉ) print("๋งค์ˆ˜์ด์•ก : ", self.๋งค์ˆ˜์ด์•ก) print("ํฌํŠธํด๋ฆฌ์˜ค ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์ˆ˜์ •") for code in list(self.portfolio.keys()): print(self.portfolio[code].__dict__) logger.info(self.portfolio[code].__dict__) self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = self.๋งค๋„ํ• ์ข…๋ชฉ + self.๋งค์ˆ˜ํ• ์ข…๋ชฉ logger.info("์˜ค๋Š˜ ๊ฑฐ๋ž˜ ์ข…๋ชฉ : %s %s" % (self.sName, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';')) self.KiwoomConnect() # MainWindow ์™ธ์—์„œ ํ‚ค์›€ API๊ตฌ๋™์‹œ์ผœ์„œ ์ž์ฒด์ ์œผ๋กœ API๋ฐ์ดํ„ฐ์†ก์ˆ˜์‹ ๊ฐ€๋Šฅํ•˜๋„๋ก ํ•จ if length(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: self.f = open('data_result.csv', 'a', newline='') self.wr = csv.writer(self.f) self.wr.writerow(['์ฒด๊ฒฐ์‹œ๊ฐ„', '์ข…๋ชฉ์ฝ”๋“œ', '์ข…๋ชฉ๋ช…', 'ํ˜„์žฌ๊ฐ€', '์ „์ผ๋Œ€๋น„']) ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';') logger.debug("์‹ค์‹œ๊ฐ„๋ฐ์ดํƒ€์š”์ฒญ ๋“ฑ๋ก๊ฒฐ๊ณผ %s" % ret) except Exception as e: print('CTradeShortTerm_Run Error :', e) Telegram('[XTrader]CTradeShortTerm_Run Error : %s' % e, send='mc') logger.error('CTradeShortTerm_Run Error : %s' % e) else: Telegram("[XTrader]%s ROBOT ์‹คํ–‰ ์ค‘์ง€" % (self.sName)) print('Stocklist : ', self.Stocklist) ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL') self.f.close() del self.f del self.wr if self.portfolio is not None: # ๊ตฌ๊ธ€ ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ๊ธฐ์กด ์ข…๋ชฉ ์‚ญ์ œ num_data = shortterm_sell_sheet.getting_total_all_values() for i in range(length(num_data)): shortterm_sell_sheet.delete_rows(2) for code in list(self.portfolio.keys()): # ๋งค์ˆ˜ ๋ฏธ์ฒด๊ฒฐ ์ข…๋ชฉ ์‚ญ์ œ if self.portfolio[code].์ˆ˜๋Ÿ‰ == 0: self.portfolio.pop(code) else: # ํฌํŠธํด๋ฆฌ์˜ค ์ข…๋ชฉ์€ ๊ตฌ๊ธ€ ๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ์— ์ถ”๊ฐ€ํ•˜์—ฌ ์ „๋žต ์ˆ˜์ •๊ฐ€๋Šฅ self.save_history(code, status='๋งค๋„๋ชจ๋‹ˆํ„ฐ๋ง') if length(self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ) > 0: try: Telegram("[XTrader]%s ๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ ์†์ต Upload : %s" % (self.sName, self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ)) logger.info("%s ๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ ์†์ต Upload : %s" % (self.sName, self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ)) self.parent.statusbar.showMessage("๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ ์†์ต Upload") self.DailyProfit(self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ) except Exception as e: print('%s ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ ๊ฒฐ๊ณผ ์—…๋กœ๋“œ Error : %s' % (self.sName, e)) fintotal_ally: del self.DailyProfitLoop # ๊ธˆ์ผ๋งค๋„๊ฒฐ๊ณผ ์—…๋ฐ์ดํŠธ ์‹œ QEventLoop ์‚ฌ์šฉ์œผ๋กœ ๋กœ๋ด‡ ์ €์žฅ ์‹œ pickcle ์—๋Ÿฌ ๋ฐœ์ƒํ•˜์—ฌ ์‚ญ์ œ์‹œํ‚ด self.KiwoomDisConnect() # ๋กœ๋ด‡ ํด๋ž˜์Šค ๋‚ด์—์„œ ์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต ๋ฐ์ดํ„ฐ๋ฅผ ๋ฐ›๊ณ ๋‚˜์„œ ์—ฐ๊ฒฐ ํ•ด์ œ์‹œํ‚ด # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() # ์žฅ๊ธฐ ํˆฌ์ž์šฉ : ํ˜„์žฌ ๋ฏธ๋ฆฌ ์„ ์ •ํ•œ ์ข…๋ชฉ์— ๋Œ€ํ•ด์„œ ๋กœ๋ด‡ ์‹œ์ž‘๊ณผ ๋™์‹œ์— ๋งค์ˆ˜ ์‹คํ–‰ ์ ์šฉ class CTradeLongTerm(CTrade): # ๋กœ๋ด‡ ์ถ”๊ฐ€ ์‹œ __init__ : ๋ณต์‚ฌ, Setting, ์ดˆ๊ธฐ์กฐ๊ฑด:์ „๋žต์— ๋งž๊ฒŒ, ๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ~Run:๋ณต์‚ฌ def __init__(self, sName, UUID, kiwoom=None, parent=None): self.sName = sName self.UUID = UUID self.sAccount = None self.kiwoom = kiwoom self.parent = parent self.running = False self.์ฃผ๋ฌธ๊ฒฐ๊ณผ = dict() self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘ = dict() self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock = dict() self.portfolio = dict() self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = [] self.Smtotal_allScreenNumber = 9999 self.d = today # RobotAdd ํ•จ์ˆ˜์—์„œ ์ดˆ๊ธฐํ™” ๋‹ค์Œ ์…‹ํŒ… ์‹คํ–‰ํ•ด์„œ ์„ค์ •๊ฐ’ ๋„˜๊น€ def Setting(self, sScreenNo, ๋งค์ˆ˜๋ฐฉ๋ฒ•='03', ๋งค๋„๋ฐฉ๋ฒ•='03', ์ข…๋ชฉ๋ฆฌ์ŠคํŠธ=[]): self.sScreenNo = sScreenNo self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = [] self.๋งค์ˆ˜๋ฐฉ๋ฒ• = ๋งค์ˆ˜๋ฐฉ๋ฒ• self.๋งค๋„๋ฐฉ๋ฒ• = ๋งค๋„๋ฐฉ๋ฒ• # Robot_Run์ด ๋˜๋ฉด ์‹คํ–‰๋จ - ๋งค์ˆ˜/๋งค๋„ ์ข…๋ชฉ์„ ๋ฆฌ์ŠคํŠธ๋กœ ์ €์žฅ def ์ดˆ๊ธฐ์กฐ๊ฑด(self): # ๋งค์ˆ˜์ด์•ก ๊ณ„์‚ฐํ•˜๊ธฐ # ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ ๋ฆฌ์ŠคํŠธ ๋ณ€์ˆ˜ ์ดˆ๊ธฐํ™” # ๋งค๋„ํ• ์ข…๋ชฉ : ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ๋˜ ์ข…๋ชฉ ์ถ”๊ฐ€ # ๋งค์ˆ˜ํ• ์ข…๋ชฉ : ๊ตฌ๊ธ€์—์„œ ๋ฐ›์€ ์ข…๋ชฉ ์ถ”๊ฐ€ self.parent.statusbar.showMessage("[%s] ์ดˆ๊ธฐ์กฐ๊ฑด์ค€๋น„" % (self.sName)) self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ = [] # ์žฅ ๋งˆ๊ฐ ํ›„ ๊ธˆ์ผ ๋งค๋„ํ•œ ์ข…๋ชฉ์— ๋Œ€ํ•ด์„œ ๋งค๋งค์ด๋ ฅ ์ •๋ฆฌ ์—…๋ฐ์ดํŠธ(๋งค๋„๊ฐ€, ์†์ต๋ฅ  ๋“ฑ) self.๋งค๋„ํ• ์ข…๋ชฉ = [] self.๋งค์ˆ˜ํ• ์ข…๋ชฉ = [] self.Stocklist = dict() kf = mk.read_csv('๋งค์ˆ˜์ข…๋ชฉ.csv', encoding='euc-kr') codes= kf['์ข…๋ชฉ'].to_list() qtys = kf['์ˆ˜๋Ÿ‰'].to_list() for ์ข…๋ชฉ์ฝ”๋“œ, ์ˆ˜๋Ÿ‰ in zip(codes, qtys): code, name, market = getting_code(์ข…๋ชฉ์ฝ”๋“œ) self.Stocklist[code] = { '์ข…๋ชฉ๋ช…' : name, '์ข…๋ชฉ์ฝ”๋“œ' : code, '์‹œ์žฅ๊ตฌ๋ถ„' : market, '๋งค์ˆ˜์ˆ˜๋Ÿ‰' : ์ˆ˜๋Ÿ‰ } self.๋งค์ˆ˜ํ• ์ข…๋ชฉ = list(self.Stocklist.keys()) # ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ๋Š” ์ข…๋ชฉ์€ ๋งค๋„ ๊ด€๋ จ ์ „๋žต ์žฌํ™•์ธ(๊ตฌ๊ธ€์‹œํŠธ) ๋ฐ '๋งค๋„ํ• ์ข…๋ชฉ'์— ์ถ”๊ฐ€ if length(self.portfolio) > 0: for port_code in list(self.portfolio.keys()): self.๋งค๋„ํ• ์ข…๋ชฉ.adding(port_code) def ์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ(self, param): try: if self.running == True: ์ฒด๊ฒฐ์‹œ๊ฐ„ = '%s %s:%s:%s' % (str(self.d), param['์ฒด๊ฒฐ์‹œ๊ฐ„'][0:2], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][2:4], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][4:]) ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] ํ˜„์žฌ๊ฐ€ = abs(int(float(param['ํ˜„์žฌ๊ฐ€']))) ์ „์ผ๋Œ€๋น„ = int(float(param['์ „์ผ๋Œ€๋น„'])) ๋“ฑ๋ฝ๋ฅ  = float(param['๋“ฑ๋ฝ๋ฅ ']) ๋งค๋„ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค๋„ํ˜ธ๊ฐ€']))) ๋งค์ˆ˜ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค์ˆ˜ํ˜ธ๊ฐ€']))) ๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰ = abs(int(float(param['๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰']))) ์‹œ๊ฐ€ = abs(int(float(param['์‹œ๊ฐ€']))) ๊ณ ๊ฐ€ = abs(int(float(param['๊ณ ๊ฐ€']))) ์ €๊ฐ€ = abs(int(float(param['์ €๊ฐ€']))) ๊ฑฐ๋ž˜ํšŒ์ „์œจ = abs(float(param['๊ฑฐ๋ž˜ํšŒ์ „์œจ'])) ์‹œ๊ฐ€์ด์•ก = abs(int(float(param['์‹œ๊ฐ€์ด์•ก']))) ์ข…๋ชฉ๋ช… = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][1] # pool[์ข…๋ชฉ์ฝ”๋“œ] = [์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€, ์‹œ๊ฐ€์ด์•ก] ์‹œ์žฅ๊ตฌ๋ถ„ = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][0] ์ „์ผ์ข…๊ฐ€ = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][3] ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€] self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (์ฒด๊ฒฐ์‹œ๊ฐ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, ์ „์ผ๋Œ€๋น„)) # ๋งค์ˆ˜ ์กฐ๊ฑด # ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์ข…๋ฃŒ ์‹œ๊ฐ„ ํ™•์ธ if current_time >= "09:00:00": if ์ข…๋ชฉ์ฝ”๋“œ in self.๋งค์ˆ˜ํ• ์ข…๋ชฉ and ์ข…๋ชฉ์ฝ”๋“œ not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ and self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting('B_%s' % ์ข…๋ชฉ์ฝ”๋“œ) is None: (result, order) = self.์ •๋Ÿ‰๋งค์ˆ˜(sRQName='B_%s' % ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ๋งค์ˆ˜๊ฐ€=ํ˜„์žฌ๊ฐ€, ์ˆ˜๋Ÿ‰=self.์ˆ˜๋Ÿ‰[0]) if result == True: self.portfolio[์ข…๋ชฉ์ฝ”๋“œ] = CPortStock_LongTerm(์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…=์ข…๋ชฉ๋ช…, ์‹œ์žฅ=์‹œ์žฅ๊ตฌ๋ถ„, ๋งค์ˆ˜๊ฐ€=ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜์ผ=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock['B_%s' % ์ข…๋ชฉ์ฝ”๋“œ] = True Telegram('[StockTrader]๋งค์ˆ˜์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s, ๋งค์ˆ˜์ˆ˜๋Ÿ‰=%s' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.์ˆ˜๋Ÿ‰[0])) logger.info('๋งค์ˆ˜์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s, ๋งค์ˆ˜์ˆ˜๋Ÿ‰=%s' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.์ˆ˜๋Ÿ‰[0])) else: Telegram('[StockTrader]๋งค์ˆ˜์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€)) logger.info('๋งค์ˆ˜์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s' % (์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€)) # ๋งค๋„ ์กฐ๊ฑด if ์ข…๋ชฉ์ฝ”๋“œ in self.๋งค๋„ํ• ์ข…๋ชฉ: pass except Exception as e: print('CTradeLongTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error : %s, %s' % (์ข…๋ชฉ๋ช…, e)) Telegram('[StockTrader]CTradeLongTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error : %s, %s' % (์ข…๋ชฉ๋ช…, e), send='mc') logger.error('CTradeLongTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error :%s, %s' % (์ข…๋ชฉ๋ช…, e)) def ์ ‘์ˆ˜์ฒ˜๋ฆฌ(self, param): pass def ์ฒด๊ฒฐ์ฒ˜๋ฆฌ(self, param): ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] ์ฃผ๋ฌธ๋ฒˆํ˜ธ = param['์ฃผ๋ฌธ๋ฒˆํ˜ธ'] self.์ฃผ๋ฌธ๊ฒฐ๊ณผ[์ฃผ๋ฌธ๋ฒˆํ˜ธ] = param ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ = int(param['์ฃผ๋ฌธ์ˆ˜๋Ÿ‰']) ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ = int(param['๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰']) ์ฒด๊ฒฐ๊ฐ€ = int(0 if (param['์ฒด๊ฒฐ๊ฐ€'] is None or param['์ฒด๊ฒฐ๊ฐ€'] == '') else param['์ฒด๊ฒฐ๊ฐ€']) # ๋งค์ž…๊ฐ€ ๋™์ผ ๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰ = int(0 if (param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰'] is None or param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰'] == '') else param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰']) ๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ = int(0 if (param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ'] is None or param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ'] == '') else param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ']) ๋‹น์ผ๋งค๋งค์„ธ๊ธˆ = int(0 if (param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] is None or param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] == '') else param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ']) # ๋งค์ˆ˜ if param['๋งค๋„์ˆ˜๊ตฌ๋ถ„'] == '2': if self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘.getting(์ฃผ๋ฌธ๋ฒˆํ˜ธ) is not None: ์ฃผ๋ฌธ = self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘[์ฃผ๋ฌธ๋ฒˆํ˜ธ] ๋งค์ˆ˜๊ฐ€ = int(์ฃผ๋ฌธ[2:]) # ๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€ = int(0 if (param['๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€'] is None or param['๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€'] == '') else param['๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€']) # logger.debug('๋งค์ˆ˜-------> %s %s %s %s %s' % (param['์ข…๋ชฉ์ฝ”๋“œ'], param['์ข…๋ชฉ๋ช…'], ๋งค์ˆ˜๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ - ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰, ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰)) P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.์ข…๋ชฉ๋ช… = param['์ข…๋ชฉ๋ช…'] P.๋งค์ˆ˜๊ฐ€ = ์ฒด๊ฒฐ๊ฐ€ # ๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€ P.์ˆ˜๋Ÿ‰ += ๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰ # ์ถ”๊ฐ€ ๋งค์ˆ˜ ๋Œ€๋น„ํ•ด์„œ ๊ธฐ์กด ์ˆ˜๋Ÿ‰์— ์ฒด๊ฒฐ๋œ ์ˆ˜๋Ÿ‰ ๊ณ„์† ๋”ํ•จ(์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ - ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰) P.๋งค์ˆ˜์ผ = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') else: logger.error('ERROR ํฌํŠธ์— ์ข…๋ชฉ์ด ์—†์Œ !!!!') if ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ == 0: try: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.pop(์ฃผ๋ฌธ) self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.remove(์ข…๋ชฉ์ฝ”๋“œ) self.๋งค๋„ํ• ์ข…๋ชฉ.adding(์ข…๋ชฉ์ฝ”๋“œ) Telegram('[StockTrader]๋งค์ˆ˜์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ๋งค์ˆ˜๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (P.์ข…๋ชฉ๋ช…, P.๋งค์ˆ˜๊ฐ€, P.์ˆ˜๋Ÿ‰)) logger.info('๋งค์ˆ˜์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ๋งค์ˆ˜๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (P.์ข…๋ชฉ๋ช…, P.๋งค์ˆ˜๊ฐ€, P.์ˆ˜๋Ÿ‰)) except Exception as e: Telegram('[XTrader]์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค์ˆ˜ ์—๋Ÿฌ ์ข…๋ชฉ๋ช…:%s, %s ' % (P.์ข…๋ชฉ๋ช…, e), send='mc') logger.error('์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค์ˆ˜ ์—๋Ÿฌ ์ข…๋ชฉ๋ช…:%s, %s ' % (P.์ข…๋ชฉ๋ช…, e)) # ๋งค๋„ if param['๋งค๋„์ˆ˜๊ตฌ๋ถ„'] == '1': if self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘.getting(์ฃผ๋ฌธ๋ฒˆํ˜ธ) is not None: ์ฃผ๋ฌธ = self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘[์ฃผ๋ฌธ๋ฒˆํ˜ธ] ๋งค๋„๊ฐ€ = int(์ฃผ๋ฌธ[2:]) try: if ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ == 0: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.pop(์ฃผ๋ฌธ) P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.์ข…๋ชฉ๋ช… = param['์ข…๋ชฉ๋ช…'] self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ฒด๊ฒฐ๊ฐ€ = ์ฒด๊ฒฐ๊ฐ€ self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„์ˆ˜๋Ÿ‰ = ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ Telegram('[StockTrader]๋งค๋„์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ฒด๊ฒฐ๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (param['์ข…๋ชฉ๋ช…'], ์ฒด๊ฒฐ๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰)) logger.info('๋งค๋„์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ฒด๊ฒฐ๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (param['์ข…๋ชฉ๋ช…'], ์ฒด๊ฒฐ๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰)) except Exception as e: Telegram('[StockTrader]์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค๋„ Error : %s' % e, send='mc') logger.error('์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค๋„ Error : %s' % e) # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() def ์ž”๊ณ ์ฒ˜๋ฆฌ(self, param): # print('CTradeShortTerm : ์ž”๊ณ ์ฒ˜๋ฆฌ') ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.๋งค์ˆ˜๊ฐ€ = int(0 if (param['๋งค์ž…๋‹จ๊ฐ€'] is None or param['๋งค์ž…๋‹จ๊ฐ€'] == '') else param['๋งค์ž…๋‹จ๊ฐ€']) P.์ˆ˜๋Ÿ‰ = int(0 if (param['๋ณด์œ ์ˆ˜๋Ÿ‰'] is None or param['๋ณด์œ ์ˆ˜๋Ÿ‰'] == '') else param['๋ณด์œ ์ˆ˜๋Ÿ‰']) if P.์ˆ˜๋Ÿ‰ == 0: self.portfolio.pop(์ข…๋ชฉ์ฝ”๋“œ) self.๋งค๋„ํ• ์ข…๋ชฉ.remove(์ข…๋ชฉ์ฝ”๋“œ) if ์ข…๋ชฉ์ฝ”๋“œ not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ.adding(์ข…๋ชฉ์ฝ”๋“œ) logger.info('์ž”๊ณ ์ฒ˜๋ฆฌ_ํฌํŠธํด๋ฆฌ์˜คPOP %s ' % ์ข…๋ชฉ์ฝ”๋“œ) # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() def Run(self, flag=True, sAccount=None): self.running = flag ret = 0 # self.manual_portfolio() # for code in list(self.portfolio.keys()): # print(self.portfolio[code].__dict__) # logger.info(self.portfolio[code].__dict__) if flag == True: print("%s ROBOT ์‹คํ–‰" % (self.sName)) try: Telegram("[StockTrader]%s ROBOT ์‹คํ–‰" % (self.sName)) self.sAccount = sAccount self.ํˆฌ์ž์ด์•ก = floor(int(d2deposit.replacing(",", "")) / length(self.parent.robots)) print('๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ : ', ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ) print('D+2 ์˜ˆ์ˆ˜๊ธˆ : ', int(d2deposit.replacing(",", ""))) print('ํˆฌ์ž ์ด์•ก : ', self.ํˆฌ์ž์ด์•ก) # self.์ตœ๋Œ€ํฌํŠธ์ˆ˜ = floor(int(d2deposit.replacing(",", "")) / self.๋‹จ์œ„ํˆฌ์ž๊ธˆ / length(self.parent.robots)) # print(self.์ตœ๋Œ€ํฌํŠธ์ˆ˜) self.์ฃผ๋ฌธ๊ฒฐ๊ณผ = dict() self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘ = dict() self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock = dict() self.์ดˆ๊ธฐ์กฐ๊ฑด() print("๋งค๋„ : ", self.๋งค๋„ํ• ์ข…๋ชฉ) print("๋งค์ˆ˜ : ", self.๋งค์ˆ˜ํ• ์ข…๋ชฉ) self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = self.๋งค๋„ํ• ์ข…๋ชฉ + self.๋งค์ˆ˜ํ• ์ข…๋ชฉ logger.info("์˜ค๋Š˜ ๊ฑฐ๋ž˜ ์ข…๋ชฉ : %s %s" % (self.sName, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';')) self.KiwoomConnect() # MainWindow ์™ธ์—์„œ ํ‚ค์›€ API๊ตฌ๋™์‹œ์ผœ์„œ ์ž์ฒด์ ์œผ๋กœ API๋ฐ์ดํ„ฐ์†ก์ˆ˜์‹ ๊ฐ€๋Šฅํ•˜๋„๋ก ํ•จ if length(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';') logger.debug("[%s]์‹ค์‹œ๊ฐ„๋ฐ์ดํƒ€์š”์ฒญ ๋“ฑ๋ก๊ฒฐ๊ณผ %s" % (self.sName, ret)) except Exception as e: print('CTradeShortTerm_Run Error :', e) Telegram('[XTrader]CTradeShortTerm_Run Error : %s' % e, send='mc') logger.error('CTradeShortTerm_Run Error : %s' % e) else: Telegram("[StockTrader]%s ROBOT ์‹คํ–‰ ์ค‘์ง€" % (self.sName)) ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL') if self.portfolio is not None: for code in list(self.portfolio.keys()): # ๋งค์ˆ˜ ๋ฏธ์ฒด๊ฒฐ ์ข…๋ชฉ ์‚ญ์ œ if self.portfolio[code].์ˆ˜๋Ÿ‰ == 0: self.portfolio.pop(code) self.KiwoomDisConnect() # ๋กœ๋ด‡ ํด๋ž˜์Šค ๋‚ด์—์„œ ์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต ๋ฐ์ดํ„ฐ๋ฅผ ๋ฐ›๊ณ ๋‚˜์„œ ์—ฐ๊ฒฐ ํ•ด์ œ์‹œํ‚ด # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() Ui_TradeCondition, QtBaseClass_TradeCondition = uic.loadUiType("./UI/TradeCondition.ui") class ํ™”๋ฉด_TradeCondition(QDialog, Ui_TradeCondition): # def __init__(self, parent): def __init__(self, sScreenNo, kiwoom=None, parent=None): # super(ํ™”๋ฉด_TradeCondition, self).__init__(parent) # self.setAttribute(Qt.WA_DeleteOnClose) # ์œ„์ ฏ์ด ๋‹ซํž๋•Œ ๋‚ด์šฉ ์‚ญ์ œํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์ฐฝ์ด ๋‹ซํž๋•Œ ์ •๋ณด๋ฅผ ์ €์žฅํ•ด์•ผ๋˜๋Š” ๋กœ๋ด‡ ์„ธํŒ… ์‹œ์—๋Š” ์“ฐ๋ฉด ์—๋Ÿฌ๋‚จ!! self.setupUi(self) # print("ํ™”๋ฉด_TradeCondition : __init__") self.sScreenNo = sScreenNo self.kiwoom = kiwoom # self.parent = parent self.progressBar.setValue(0) # Progressbar ์ดˆ๊ธฐ ์…‹ํŒ… self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['์ข…๋ชฉ์ฝ”๋“œ', '์ข…๋ชฉ๋ช…'] self.result = [] self.KiwoomConnect() self.GetCondition() # ๋งค์ˆ˜ ์ข…๋ชฉ ์„ ์ •์„ ์œ„ํ•œ ์ฒดํฌ ํ•จ์ˆ˜ def pick_stock(self, data): row = [] cnt = 0 for code in data['์ข…๋ชฉ์ฝ”๋“œ']: url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code) response = requests.getting(url) soup = BeautifulSoup(response.text, 'html.parser') tag = soup.find_total_all("td", {"class": "num"}) # tag = soup.find_total_all("span") result = [] temp = [] for i in tag: temp.adding(i.text.replacing('\t', '').replacing('\n', '')) result.adding(code) # ์ข…๋ชฉ์ฝ”๋“œ result.adding(int(temp[5].replacing(',',''))) # ์ „์ผ์ข…๊ฐ€ # result.adding(temp[7]) # ์‹œ๊ฐ€ # result.adding(temp[11]) # ์ €๊ฐ€ # result.adding(temp[9]) # ๊ณ ๊ฐ€ result.adding(int(temp[0].replacing(',',''))) # ์ข…๊ฐ€(ํ˜„์žฌ๊ฐ€) # result.adding(temp[6]) # ๊ฑฐ๋ž˜๋Ÿ‰ row.adding(result) cnt+=1 # Progress Bar ๋””์Šคํ”Œ๋ ˆ์ด(์ „์ฒด ์‹œ๊ฐ„ ๋Œ€๋น„ ๋น„์œจ) self.progressBar.setValue(cnt / length(data) * 100) kf = mk.KnowledgeFrame(data=row, columns=['์ข…๋ชฉ์ฝ”๋“œ', '์ „์ผ์ข…๊ฐ€', '์ข…๊ฐ€']) kf_final = mk.unioner(data, kf, on='์ข…๋ชฉ์ฝ”๋“œ') kf_final = kf_final.reseting_index(sip=True) kf_final['๋“ฑ๋ฝ๋ฅ '] = value_round((kf_final['์ข…๊ฐ€'] - kf_final['์ „์ผ์ข…๊ฐ€'])/kf_final['์ „์ผ์ข…๊ฐ€'] * 100, 1) kf_final = kf_final[kf_final['๋“ฑ๋ฝ๋ฅ '] >= 1][['์ข…๋ชฉ์ฝ”๋“œ', '์ข…๋ชฉ๋ช…', '๋“ฑ๋ฝ๋ฅ ']] kf_final = kf_final.reseting_index(sip=True) print(kf_final) return kf_final # ์ €์žฅ๋œ ์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ๋ชฉ๋ก ์ฝ์Œ def GetCondition(self): # 1. ์ €์žฅ๋œ ์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ๋ชฉ๋ก ๋ถˆ๋Ÿฌ์˜ด GetCondition # 2. ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ gettingConditionLoad # 3. ๋ชฉ๋ก ์š”์ฒญ ์‘๋‹ต ์ด๋ฒคํŠธ OnReceiveConditionVer์—์„œ # gettingConditionNameList๋กœ ๋ชฉ๋ก์„ ๋”•์…”๋„ˆ๋ฆฌ๋กœ self.condition์— ๋ฐ›์Œ # 4. GetCondition์—์„œ self.condition์„ ์ •๋ฆฌํ•ด์„œ ์ฝค๋ณด๋ฐ•์Šค์— ๋ชฉ๋ก ์ถ”๊ฐ€ํ•จ try: # print("ํ™”๋ฉด_TradeCondition : GetCondition") self.gettingConditionLoad() self.kf_condition = KnowledgeFrame() self.idx = [] self.conName = [] for index in self.condition.keys(): # condition์€ dictionary # print(self.condition) self.idx.adding(str(index)) self.conName.adding(self.condition[index]) # self.sendCondition("0156", self.condition[index], index, 1) self.kf_condition['Index'] = self.idx self.kf_condition['Name'] = self.conName self.kf_condition['Table'] = ">> ์กฐ๊ฑด์‹ " + self.kf_condition['Index'] + " : " + self.kf_condition['Name'] self.kf_condition['Index'] = self.kf_condition['Index'].totype(int) self.kf_condition = self.kf_condition.sort_the_values(by='Index').reseting_index(sip=True) # ์ถ”๊ฐ€ print(self.kf_condition) # ์ถ”๊ฐ€ self.comboBox_condition.clear() self.comboBox_condition.addItems(self.kf_condition['Table'].values) except Exception as e: print("GetCondition_Error") print(e) # ์กฐ๊ฑด๊ฒ€์ƒ‰ ํ•ด๋‹น ์ข…๋ชฉ ์š”์ฒญ ๋ฉ”์„œ๋“œ def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime): # print("ํ™”๋ฉด_TradeCondition : sendCondition") """ ์ข…๋ชฉ ์กฐ๊ฑด๊ฒ€์ƒ‰ ์š”์ฒญ ๋ฉ”์„œ๋“œ ์ด ๋ฉ”์„œ๋“œ๋กœ ์–ป๊ณ ์ž ํ•˜๋Š” ๊ฒƒ์€ ํ•ด๋‹น ์กฐ๊ฑด์— ๋งž๋Š” ์ข…๋ชฉ์ฝ”๋“œ์ด๋‹ค. ํ•ด๋‹น ์ข…๋ชฉ์— ๋Œ€ํ•œ ์ƒ์„ธ์ •๋ณด๋Š” setRealReg() ๋ฉ”์„œ๋“œ๋กœ ์š”์ฒญํ•  ์ˆ˜ ์žˆ๋‹ค. ์š”์ฒญ์ด ์‹คํŒจํ•˜๋Š” ๊ฒฝ์šฐ๋Š”, ํ•ด๋‹น ์กฐ๊ฑด์‹์ด ์—†๊ฑฐ๋‚˜, ์กฐ๊ฑด๋ช…๊ณผ ์ธ๋ฑ์Šค๊ฐ€ ๋งž์ง€ ์•Š๊ฑฐ๋‚˜, ์กฐํšŒ ํšŸ์ˆ˜๋ฅผ ์ดˆ๊ณผํ•˜๋Š” ๊ฒฝ์šฐ ๋ฐœ์ƒํ•œ๋‹ค. ์กฐ๊ฑด๊ฒ€์ƒ‰์— ๋Œ€ํ•œ ๊ฒฐ๊ณผ๋Š” 1ํšŒ์„ฑ ์กฐํšŒ์˜ ๊ฒฝ์šฐ, receiveTrCondition() ์ด๋ฒคํŠธ๋กœ ๊ฒฐ๊ณผ๊ฐ’์ด ์ „๋‹ฌ๋˜๋ฉฐ ์‹ค์‹œ๊ฐ„ ์กฐํšŒ์˜ ๊ฒฝ์šฐ, receiveTrCondition()๊ณผ receiveRealCondition() ์ด๋ฒคํŠธ๋กœ ๊ฒฐ๊ณผ๊ฐ’์ด ์ „๋‹ฌ๋œ๋‹ค. :param screenNo: string :param conditionName: string - ์กฐ๊ฑด์‹ ์ด๋ฆ„ :param conditionIndex: int - ์กฐ๊ฑด์‹ ์ธ๋ฑ์Šค :param isRealTime: int - ์กฐ๊ฑด๊ฒ€์ƒ‰ ์กฐํšŒ๊ตฌ๋ถ„(0: 1ํšŒ์„ฑ ์กฐํšŒ, 1: ์‹ค์‹œ๊ฐ„ ์กฐํšŒ) """ isRequest = self.kiwoom.dynamicCtotal_all("SendCondition(QString, QString, int, int", screenNo, conditionName, conditionIndex, isRealTime) # OnReceiveTrCondition() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ self.conditionLoop = QEventLoop() self.conditionLoop.exec_() # ์กฐ๊ฑด ๊ฒ€์ƒ‰ ๊ด€๋ จ ActiveX์™€ On์‹œ๋ฆฌ์ฆˆ์™€ ๋ถ™์ž„(์ฝœ๋ฐฑ) def KiwoomConnect(self): # print("ํ™”๋ฉด_TradeCondition : KiwoomConnect") self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer) self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition) # ์กฐ๊ฑด ๊ฒ€์ƒ‰ ๊ด€๋ จ ActiveX์™€ On์‹œ๋ฆฌ์ฆˆ ์—ฐ๊ฒฐ ํ•ด์ œ def KiwoomDisConnect(self): # print("ํ™”๋ฉด_TradeCondition : KiwoomDisConnect") self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer) self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition) # ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ ๋ฉ”์„œ๋“œ def gettingConditionLoad(self): """ ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ ๋ฉ”์„œ๋“œ """ # print("ํ™”๋ฉด_TradeCondition : gettingConditionLoad") self.kiwoom.dynamicCtotal_all("GetConditionLoad()") # OnReceiveConditionVer() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ self.conditionLoop = QEventLoop() self.conditionLoop.exec_() # ์กฐ๊ฑด์‹ ๋ชฉ๋ก ํš๋“ ๋ฉ”์„œ๋“œ(์กฐ๊ฑด์‹ ๋ชฉ๋ก์„ ๋”•์…”๋„ˆ๋ฆฌ๋กœ ๋ฆฌํ„ด) def gettingConditionNameList(self): """ ์กฐ๊ฑด์‹ ํš๋“ ๋ฉ”์„œ๋“œ ์กฐ๊ฑด์‹์„ ๋”•์…”๋„ˆ๋ฆฌ ํ˜•ํƒœ๋กœ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ฉ”์„œ๋“œ๋Š” ๋ฐ˜๋“œ์‹œ receiveConditionVer() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์•ˆ์—์„œ ์‚ฌ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. :return: dict - {์ธ๋ฑ์Šค:์กฐ๊ฑด๋ช…, ์ธ๋ฑ์Šค:์กฐ๊ฑด๋ช…, ...} """ # print("ํ™”๋ฉด_TradeCondition : gettingConditionNameList") data = self.kiwoom.dynamicCtotal_all("GetConditionNameList()") conditionList = data.split(';') del conditionList[-1] conditionDictionary = {} for condition in conditionList: key, value = condition.split('^') conditionDictionary[int(key)] = value return conditionDictionary # ์กฐ๊ฑด๊ฒ€์ƒ‰ ์„ธ๋ถ€ ์ข…๋ชฉ ์กฐํšŒ ์š”์ฒญ์‹œ ๋ฐœ์ƒ๋˜๋Š” ์ด๋ฒคํŠธ def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext): logger.debug('main:OnReceiveTrCondition [%s] [%s] [%s] [%s] [%s]' % (sScrNo, strCodeList, strConditionName, nIndex, nNext)) # print("ํ™”๋ฉด_TradeCondition : OnReceiveTrCondition") """ (1ํšŒ์„ฑ, ์‹ค์‹œ๊ฐ„) ์ข…๋ชฉ ์กฐ๊ฑด๊ฒ€์ƒ‰ ์š”์ฒญ์‹œ ๋ฐœ์ƒ๋˜๋Š” ์ด๋ฒคํŠธ :param screenNo: string :param codes: string - ์ข…๋ชฉ์ฝ”๋“œ ๋ชฉ๋ก(๊ฐ ์ข…๋ชฉ์€ ์„ธ๋ฏธ์ฝœ๋ก ์œผ๋กœ ๊ตฌ๋ถ„๋จ) :param conditionName: string - ์กฐ๊ฑด์‹ ์ด๋ฆ„ :param conditionIndex: int - ์กฐ๊ฑด์‹ ์ธ๋ฑ์Šค :param inquiry: int - ์กฐํšŒ๊ตฌ๋ถ„(0: ๋‚จ์€๋ฐ์ดํ„ฐ ์—†์Œ, 2: ๋‚จ์€๋ฐ์ดํ„ฐ ์žˆ์Œ) """ try: if strCodeList == "": return self.codeList = strCodeList.split(';') del self.codeList[-1] # print("์ข…๋ชฉ๊ฐœ์ˆ˜: ", length(self.codeList)) # print(self.codeList) for code in self.codeList: row = [] # code.adding(c) row.adding(code) n = self.kiwoom.dynamicCtotal_all("GetMasterCodeName(QString)", code) # now = abs(int(self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", code, 10))) # name.adding(n) row.adding(n) # row.adding(now) self.result.adding(row) # self.kf_con['์ข…๋ชฉ์ฝ”๋“œ'] = code # self.kf_con['์ข…๋ชฉ๋ช…'] = name # print(self.kf_con) self.data = KnowledgeFrame(data=self.result, columns=self.columns) self.data['์ข…๋ชฉ์ฝ”๋“œ'] = "'" + self.data['์ข…๋ชฉ์ฝ”๋“œ'] # self.data.to_csv('์กฐ๊ฑด์‹_'+ self.condition_name + '_์ข…๋ชฉ.csv', encoding='euc-kr', index=False) # print(self.temp) # ์ข…๋ชฉ์— ๋Œ€ํ•œ ์ฃผ๊ฐ€ ํฌ๋กค๋ง ํ›„ ์ตœ์ข… ์ข…๋ชฉ ์„ ์ • # self.data = self.pick_stock(self.data) self.model.umkate(self.data) # self.model.umkate(self.kf_con) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) except Exception as e: print("OnReceiveTrCondition Error : ", e) fintotal_ally: self.conditionLoop.exit() # ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ์— ๋Œ€ํ•œ ์‘๋‹ต ์ด๋ฒคํŠธ def OnReceiveConditionVer(self, lRet, sMsg): logger.debug('main:OnReceiveConditionVer : [์ด๋ฒคํŠธ] ์กฐ๊ฑด์‹ ์ €์žฅ [%s] [%s]' % (lRet, sMsg)) # print("ํ™”๋ฉด_TradeCondition : OnReceiveConditionVer") """ gettingConditionLoad() ๋ฉ”์„œ๋“œ์˜ ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ์— ๋Œ€ํ•œ ์‘๋‹ต ์ด๋ฒคํŠธ :param receive: int - ์‘๋‹ต๊ฒฐ๊ณผ(1: ์„ฑ๊ณต, ๋‚˜๋จธ์ง€ ์‹คํŒจ) :param msg: string - ๋ฉ”์„ธ์ง€ """ try: self.condition = self.gettingConditionNameList() # condition์ด ๋ฆฌํ„ด๋˜์„œ ์˜ค๋ฉด GetCondition์—์„œ condition ๋ณ€์ˆ˜ ์‚ฌ์šฉ ๊ฐ€๋Šฅ # print("์กฐ๊ฑด์‹ ๊ฐœ์ˆ˜: ", length(self.condition)) # for key in self.condition.keys(): # print("์กฐ๊ฑด์‹: ", key, ": ", self.condition[key]) except Exception as e: print("OnReceiveConditionVer_Error") fintotal_ally: self.conditionLoop.exit() # print(self.conditionName) # self.kiwoom.dynamicCtotal_all("SendCondition(QString,QString, int, int)", '0156', '๊ฐญ์ƒ์Šน', 0, 0) # ์‹ค์‹œ๊ฐ„ ์ข…๋ชฉ ์กฐ๊ฑด๊ฒ€์ƒ‰ ์š”์ฒญ์‹œ ๋ฐœ์ƒ๋˜๋Š” ์ด๋ฒคํŠธ def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex): logger.debug('main:OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex)) # print("ํ™”๋ฉด_TradeCondition : OnReceiveRealCondition") """ ์‹ค์‹œ๊ฐ„ ์ข…๋ชฉ ์กฐ๊ฑด๊ฒ€์ƒ‰ ์š”์ฒญ์‹œ ๋ฐœ์ƒ๋˜๋Š” ์ด๋ฒคํŠธ :param code: string - ์ข…๋ชฉ์ฝ”๋“œ :param event: string - ์ด๋ฒคํŠธ์ข…๋ฅ˜("I": ์ข…๋ชฉํŽธ์ž…, "D": ์ข…๋ชฉ์ดํƒˆ) :param conditionName: string - ์กฐ๊ฑด์‹ ์ด๋ฆ„ :param conditionIndex: string - ์กฐ๊ฑด์‹ ์ธ๋ฑ์Šค(์—ฌ๊ธฐ์„œ๋งŒ ์ธ๋ฑ์Šค๊ฐ€ string ํƒ€์ž…์œผ๋กœ ์ „๋‹ฌ๋จ) """ print("[receiveRealCondition]") print("์ข…๋ชฉ์ฝ”๋“œ: ", sTrCode) print("์ด๋ฒคํŠธ: ", "์ข…๋ชฉํŽธ์ž…" if strType == "I" else "์ข…๋ชฉ์ดํƒˆ") # ์กฐ๊ฑด์‹ ์ข…๋ชฉ ๊ฒ€์ƒ‰ ๋ฒ„ํŠผ ํด๋ฆญ ์‹œ ์‹คํ–‰๋จ(์‹œ๊ทธ๋„/์Šฌ๋กฏ ์ถ”๊ฐ€) def inquiry(self): # print("ํ™”๋ฉด_TradeCondition : inquiry") try: self.result = [] index = int(self.kf_condition['Index'][self.comboBox_condition.currentIndex()]) # currentIndex() : ํ˜„์žฌ ์ฝค๋ณด๋ฐ•์Šค์—์„œ ์„ ํƒ๋œ index๋ฅผ ๋ฐ›์Œ intํ˜• self.condition_name = self.condition[index] print(index, self.condition[index]) self.sendCondition("0156", self.condition[index], index, 0) # 1 : ์‹ค์‹œ๊ฐ„ ์กฐ๊ฑด๊ฒ€์ƒ‰์‹ ์ข…๋ชฉ ์กฐํšŒ, 0 : ์ผํšŒ์„ฑ ์กฐํšŒ except Exception as e: print("์กฐ๊ฑด ๊ฒ€์ƒ‰ Error: ", e) class CTradeCondition(CTrade): # ๋กœ๋ด‡ ์ถ”๊ฐ€ ์‹œ __init__ : ๋ณต์‚ฌ, Setting / ์ดˆ๊ธฐ์กฐ๊ฑด:์ „๋žต์— ๋งž๊ฒŒ, ๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ / Run:๋ณต์‚ฌ def __init__(self, sName, UUID, kiwoom=None, parent=None): # print("CTradeCondition : __init__") self.sName = sName self.UUID = UUID self.sAccount = None self.kiwoom = kiwoom self.parent = parent self.running = False self.remained_data = True self.์ดˆ๊ธฐ์„ค์ •์ƒํƒœ = False self.์ฃผ๋ฌธ๊ฒฐ๊ณผ = dict() self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘ = dict() self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock = dict() self.portfolio = dict() self.CList = [] self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = [] self.Smtotal_allScreenNumber = 9999 self.d = today # ์กฐ๊ฑด์‹ ์„ ํƒ์— ์˜ํ•ด์„œ ํˆฌ์ž๊ธˆ, ๋งค์ˆ˜/๋„ ๋ฐฉ๋ฒ•, ํฌํŠธํด๋ฆฌ์˜ค ์ˆ˜, ๊ฒ€์ƒ‰ ์ข…๋ชฉ ๋“ฑ์ด ์ €์žฅ๋จ def Setting(self, sScreenNo, ํฌํŠธํด๋ฆฌ์˜ค์ˆ˜, ์กฐ๊ฑด์‹์ธ๋ฑ์Šค, ์กฐ๊ฑด์‹๋ช…, ์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž…, ๋‹จ์œ„ํˆฌ์ž๊ธˆ, ๋งค์ˆ˜๋ฐฉ๋ฒ•, ๋งค๋„๋ฐฉ๋ฒ•): # print("CTradeCondition : Setting") self.sScreenNo = sScreenNo self.ํฌํŠธํด๋ฆฌ์˜ค์ˆ˜ = ํฌํŠธํด๋ฆฌ์˜ค์ˆ˜ self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค = ์กฐ๊ฑด์‹์ธ๋ฑ์Šค self.์กฐ๊ฑด์‹๋ช… = ์กฐ๊ฑด์‹๋ช… self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž… = int(์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž…) self.๋‹จ์œ„ํˆฌ์ž๊ธˆ = ๋‹จ์œ„ํˆฌ์ž๊ธˆ self.๋งค์ˆ˜๋ฐฉ๋ฒ• = ๋งค์ˆ˜๋ฐฉ๋ฒ• self.๋งค๋„๋ฐฉ๋ฒ• = ๋งค๋„๋ฐฉ๋ฒ• self.๋ณด์œ ์ผ = 1 self.์ต์ ˆ = 5 # percent self.๊ณ ๊ฐ€๋Œ€๋น„ = -1 # percent self.์†์ ˆ = -2.7 # percent self.ํˆฌ์ž๊ธˆ๋น„์ค‘ = 70 # ์˜ˆ์ˆ˜๊ธˆ ๋Œ€๋น„ percent print("์กฐ๊ฒ€๊ฒ€์ƒ‰ ๋กœ๋ด‡ ์…‹ํŒ… ์™„๋ฃŒ - ์กฐ๊ฑด์ธ๋ฑ์Šค : %s, ์กฐ๊ฑด์‹๋ช… : %s, ๊ฒ€์ƒ‰ํƒ€์ž… : %s"%(self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค, self.์กฐ๊ฑด์‹๋ช…, self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž…)) logger.info("์กฐ๊ฒ€๊ฒ€์ƒ‰ ๋กœ๋ด‡ ์…‹ํŒ… ์™„๋ฃŒ - ์กฐ๊ฑด์ธ๋ฑ์Šค : %s, ์กฐ๊ฑด์‹๋ช… : %s, ๊ฒ€์ƒ‰ํƒ€์ž… : %s" % (self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค, self.์กฐ๊ฑด์‹๋ช…, self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž…)) # Robot_Run์ด ๋˜๋ฉด ์‹คํ–‰๋จ - ๋งค๋„ ์ข…๋ชฉ์„ ๋ฆฌ์ŠคํŠธ๋กœ ์ €์žฅ def ์ดˆ๊ธฐ์กฐ๊ฑด(self, codes): # print("CTradeCondition : ์ดˆ๊ธฐ์กฐ๊ฑด") self.parent.statusbar.showMessage("[%s] ์ดˆ๊ธฐ์กฐ๊ฑด์ค€๋น„" % (self.sName)) self.sell_band = [0, 3, 5, 10, 15, 25] self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด = [-2.7, 0.5, -2.0, -2.0, -2.0, -2.0] self.๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง = True self.clearcheck = False # ๋‹น์ผ์ฒญ์‚ฐ ์ฒดํฌ๋ณ€์ˆ˜ self.์กฐ๊ฑด๊ฒ€์ƒ‰์ด๋ฒคํŠธ = False # ๋งค์ˆ˜ํ•  ์ข…๋ชฉ์€ ํ•ด๋‹น ์กฐ๊ฑด์—์„œ ๊ฒ€์ƒ‰๋œ ์ข…๋ชฉ # ๋งค๋„ํ•  ์ข…๋ชฉ์€ ์ด๋ฏธ ๋งค์ˆ˜๊ฐ€ ๋˜์–ด ํฌํŠธํด๋ฆฌ์˜ค์— ์ €์žฅ๋˜์–ด ์žˆ๋Š” ์ข…๋ชฉ self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ = [] self.๋งค๋„ํ• ์ข…๋ชฉ = [] self.๋งค์ˆ˜ํ• ์ข…๋ชฉ = codes # for code in codes: # ์„ ํƒํ•œ ์ข…๋ชฉ๊ฒ€์ƒ‰์‹์˜ ์ข…๋ชฉ์€ '๋งค์ˆ˜ํ• ์ข…๋ชฉ'์— ์ถ”๊ฐ€ # stock = self.portfolio.getting(code) # ์ดˆ๊ธฐ ๋กœ๋ด‡ ์‹คํ–‰ ์‹œ ํฌํŠธํด๋ฆฌ์˜ค๋Š” ๋น„์–ด์žˆ์Œ # if stock != None: # ๊ฒ€์ƒ‰ํ•œ ์ข…๋ชฉ์ด ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ์œผ๋ฉด '๋งค๋„ํ• ์ข…๋ชฉ'์— ์ถ”๊ฐ€ # self.๋งค๋„ํ• ์ข…๋ชฉ.adding(code) # else: # ํฌํŠธํด๋ฆฌ์˜ค์— ์—†์œผ๋ฉด ๋งค์ˆ˜์ข…๋ชฉ๋ฆฌ์ŠคํŠธ์— ์ €์žฅ # self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.adding(code) for port_code in list(self.portfolio.keys()): # ํฌํŠธํด๋ฆฌ์˜ค์— ์žˆ๋Š” ์ข…๋ชฉ์€ '๋งค๋„ํ• ์ข…๋ชฉ'์— ์ถ”๊ฐ€ ๋ณด์œ ๊ธฐ๊ฐ„ = holdingcal(self.portfolio[port_code].๋งค์ˆ˜์ผ) - 1 if ๋ณด์œ ๊ธฐ๊ฐ„ < 3: self.portfolio[port_code].๋งค๋„์ „๋žต = 5 # ๋งค๋„์ง€์—ฐ ์ข…๋ชฉ์€ ๋ชฉํ‘œ๊ฐ€ ๋‚ฎ์ถค 5% -> 3% -> 1% elif ๋ณด์œ ๊ธฐ๊ฐ„ >= 3 and ๋ณด์œ ๊ธฐ๊ฐ„ < 5: self.portfolio[port_code].๋งค๋„์ „๋žต = 3 elif ๋ณด์œ ๊ธฐ๊ฐ„ >= 3 and ๋ณด์œ ๊ธฐ๊ฐ„ < 5: self.portfolio[port_code].๋งค๋„์ „๋žต = 1 print(self.portfolio[port_code].__dict__) logger.info(self.portfolio[port_code].__dict__) self.๋งค๋„ํ• ์ข…๋ชฉ.adding(port_code) # ์ˆ˜๋™ ํฌํŠธํด๋ฆฌ์˜ค ์ƒ์„ฑ def manual_portfolio(self): self.portfolio = dict() self.Stocklist = { '032190': {'์ข…๋ชฉ๋ช…': '๋‹ค์šฐ๋ฐ์ดํƒ€', '์ข…๋ชฉ์ฝ”๋“œ': '032190', '๋งค์ˆ˜๊ฐ€': [16150], '์ˆ˜๋Ÿ‰': 12, '๋ณด์œ ์ผ':1, '๋งค์ˆ˜์ผ': '2020/08/05 09:08:54'}, '047400': {'์ข…๋ชฉ๋ช…': '์œ ๋‹ˆ์˜จ๋จธํ‹ฐ๋ฆฌ์–ผ', '์ข…๋ชฉ์ฝ”๋“œ': '047400', '๋งค์ˆ˜๊ฐ€': [5350], '์ˆ˜๋Ÿ‰': 36, '๋ณด์œ ์ผ':1, '๋งค์ˆ˜์ผ': '2020/08/05 09:42:55'}, '085660': {'์ข…๋ชฉ๋ช…': '์ฐจ๋ฐ”์ด์˜คํ…', '์ข…๋ชฉ์ฝ”๋“œ': '085660', '๋งค์ˆ˜๊ฐ€': [22100], '์ˆ˜๋Ÿ‰': 9, '๋ณด์œ ์ผ': 1, '๋งค์ˆ˜์ผ': '2020/08/05 09:08:54'}, '000020': {'์ข…๋ชฉ๋ช…': '๋™ํ™”์•ฝํ’ˆ', '์ข…๋ชฉ์ฝ”๋“œ': '000020', '๋งค์ˆ˜๊ฐ€': [25800 ], '์ˆ˜๋Ÿ‰': 7, '๋ณด์œ ์ผ': 1, '๋งค์ˆ˜์ผ': '2020/08/05 09:42:55'}, } for code in list(self.Stocklist.keys()): self.portfolio[code] = CPortStock(์ข…๋ชฉ์ฝ”๋“œ=code, ์ข…๋ชฉ๋ช…=self.Stocklist[code]['์ข…๋ชฉ๋ช…'], ๋งค์ˆ˜๊ฐ€=self.Stocklist[code]['๋งค์ˆ˜๊ฐ€'][0], ๋ณด์œ ์ผ=self.Stocklist[code]['๋ณด์œ ์ผ'], ์ˆ˜๋Ÿ‰=self.Stocklist[code]['์ˆ˜๋Ÿ‰'], ๋งค์ˆ˜์ผ=self.Stocklist[code]['๋งค์ˆ˜์ผ']) # google spreadsheet ๋งค๋งค์ด๋ ฅ ์ƒ์„ฑ def save_history(self, code, status): # ๋งค๋งค์ด๋ ฅ sheet์— ํ•ด๋‹น ์ข…๋ชฉ(๋งค์ˆ˜๋œ ์ข…๋ชฉ)์ด ์žˆ์œผ๋ฉด row๋ฅผ ๋ฐ˜ํ™˜ ์•„๋‹ˆ๋ฉด ์˜ˆ์™ธ์ฒ˜๋ฆฌ -> ์‹ ๊ทœ ๋งค์ˆ˜๋กœ ์ฒ˜๋ฆฌ try: code_row = condition_history_sheet.findtotal_all(self.portfolio[code].์ข…๋ชฉ๋ช…)[ -1].row # ์ข…๋ชฉ๋ช…์ด ์žˆ๋Š” ๋ชจ๋“  ์…€์„ ์ฐพ์•„์„œ ๋งจ ์•„๋ž˜์— ์žˆ๋Š” ์…€์„ ์„ ํƒ cell = alpha_list[condition_history_cols.index('๋งค๋„๊ฐ€')] + str(code_row) # ๋งค์ˆ˜ ์ด๋ ฅ์— ์žˆ๋Š” ์ข…๋ชฉ์ด ๋งค๋„๊ฐ€ ๋˜์—ˆ๋Š”์ง€ ํ™•์ธ sell_price = condition_history_sheet.acell(str(cell)).value # ๋งค๋„ ์ด๋ ฅ์€ ์ถ”๊ฐ€ ๋งค๋„(๋งค๋„์ „๋žต5์˜ ๊ฒฝ์šฐ)๋‚˜ ์‹ ๊ทœ ๋งค๋„์ธ ๊ฒฝ์šฐ๋ผ ๋งค๋„ ์ด๋ ฅ ์œ ๋ฌด์™€ ์ƒ๊ด€์—†์Œ if status == '๋งค๋„': # ํฌํŠธํด๋ฆฌ์˜ค ๋ฐ์ดํ„ฐ ์‚ฌ์šฉ cell = alpha_list[condition_history_cols.index('๋งค๋„๊ฐ€')] + str(code_row) condition_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค๋„๊ฐ€) cell = alpha_list[condition_history_cols.index('๋งค๋„์ผ')] + str(code_row) condition_history_sheet.umkate_acell(cell, datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ  = value_round((self.portfolio[code].๋งค๋„๊ฐ€ / self.portfolio[code].๋งค์ˆ˜๊ฐ€ - 1) * 100, 2) cell = alpha_list[condition_history_cols.index('์ˆ˜์ต๋ฅ (๊ณ„์‚ฐ)')] + str(code_row) # ์ˆ˜์ต๋ฅ  ๊ณ„์‚ฐ condition_history_sheet.umkate_acell(cell, ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ ) # ๋งค์ˆ˜ ์ด๋ ฅ์€ ์žˆ์œผ๋‚˜ ๋งค๋„ ์ด๋ ฅ์ด ์—†์Œ -> ๋งค๋„ ์ „ ์ถ”๊ฐ€ ๋งค์ˆ˜ if sell_price == '': if status == '๋งค์ˆ˜': # ํฌํŠธํด๋ฆฌ์˜ค ๋ฐ์ดํ„ฐ ์‚ฌ์šฉ cell = alpha_list[condition_history_cols.index('๋งค์ˆ˜๊ฐ€')] + str(code_row) condition_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค์ˆ˜๊ฐ€) cell = alpha_list[condition_history_cols.index('๋งค์ˆ˜์ผ')] + str(code_row) condition_history_sheet.umkate_acell(cell, self.portfolio[code].๋งค์ˆ˜์ผ) else: # ๋งค๋„๊ฐ€๊ฐ€ ๊ธฐ๋ก๋˜์–ด ๊ฑฐ๋ž˜๊ฐ€ ์™„๋ฃŒ๋œ ์ข…๋ชฉ์œผ๋กœ ํŒ๋‹จํ•˜์—ฌ ์˜ˆ์™ธ๋ฐœ์ƒ์œผ๋กœ ์‹ ๊ทœ ๋งค์ˆ˜ ์ถ”๊ฐ€ํ•จ raise Exception('๋งค๋งค์™„๋ฃŒ ์ข…๋ชฉ') except: row = [] try: if status == '๋งค์ˆ˜': row.adding(self.portfolio[code].์ข…๋ชฉ๋ช…) row.adding(self.portfolio[code].๋งค์ˆ˜๊ฐ€) row.adding(self.portfolio[code].๋งค์ˆ˜์ผ) condition_history_sheet.adding_row(row) except Exception as e: print('[%s]save_history Error :'%(self.sName,e)) Telegram('[StockTrader][%s]save_history Error :'%(self.sName,e), send='mc') logger.error('[%s]save_history Error :'%(self.sName,e)) # ๋งค์ˆ˜ ์ „๋žต๋ณ„ ๋งค์ˆ˜ ์กฐ๊ฑด ํ™•์ธ def buy_strategy(self, code, price): result = False ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€ = price # ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€] if self.๋‹จ์œ„ํˆฌ์ž๊ธˆ // ํ˜„์žฌ๊ฐ€ > 0 and ํ˜„์žฌ๊ฐ€ >= ๊ณ ๊ฐ€ * (0.99) and ์ €๊ฐ€ > ์ „์ผ์ข…๊ฐ€ and ํ˜„์žฌ๊ฐ€ < ์‹œ๊ฐ€ * 1.1 and ์‹œ๊ฐ€ <= ์ „์ผ์ข…๊ฐ€ * 1.05: result = True return result # ๋งค๋„ ๊ตฌ๊ฐ„ ํ™•์ธ def profit_band_check(self, ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€): # print('ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€', ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€) ratio = value_round((ํ˜„์žฌ๊ฐ€ - ๋งค์ˆ˜๊ฐ€) / ๋งค์ˆ˜๊ฐ€ * 100, 2) # print('ratio', ratio) if ratio < 3: return 1 elif ratio in self.sell_band: return self.sell_band.index(ratio) + 1 else: self.sell_band.adding(ratio) self.sell_band.sort() band = self.sell_band.index(ratio) self.sell_band.remove(ratio) return band # ๋งค๋„ ์ „๋žต def sell_strategy(self, code, price): result = False band = self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ # ์ด์ „ ๋งค๋„ ๊ตฌ๊ฐ„ ๋ฐ›์Œ ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€ = price # ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€] ๋งค์ˆ˜๊ฐ€ = self.portfolio[code].๋งค์ˆ˜๊ฐ€ sell_price = ํ˜„์žฌ๊ฐ€ # ๋งค๋„๋ฅผ ์œ„ํ•œ ์ˆ˜์ต๋ฅ  ๊ตฌ๊ฐ„ ์ฒดํฌ(๋งค์ˆ˜๊ฐ€ ๋Œ€๋น„ ํ˜„์žฌ๊ฐ€์˜ ์ˆ˜์ต๋ฅ  ์กฐ๊ฑด์— ๋‹ค๋ฅธ ๊ตฌ๊ฐ„ ์„ค์ •) new_band = self.profit_band_check(ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ฐ€) if (hogacal(์‹œ๊ฐ€, 0, self.portfolio[code].์‹œ์žฅ, '์ƒํ•œ๊ฐ€')) <= ํ˜„์žฌ๊ฐ€: band = 7 if band < new_band: # ์ด์ „ ๊ตฌ๊ฐ„๋ณด๋‹ค ํ˜„์žฌ ๊ตฌ๊ฐ„์ด ๋†’์„ ๊ฒฝ์šฐ(์‹œ์„ธ๊ฐ€ ์˜ฌ๋ผ๊ฐ„ ๊ฒฝ์šฐ)๋งŒ band = new_band # ๊ตฌ๊ฐ„์„ ํ˜„์žฌ ๊ตฌ๊ฐ„์œผ๋กœ ๋ณ€๊ฒฝ(๋ฐ˜๋Œ€์˜ ๊ฒฝ์šฐ๋Š” ๊ตฌ๊ฐ„ ์œ ์ง€) # self.sell_band = [0, 3, 5, 10, 15, 25] # self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด = [-2.7, 0.3, -3.0, -4.0, -5.0, -7.0] if band == 1 and ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + (self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[0] / 100)): result = False elif band == 2 and ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + (self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[1] / 100)): # 3% ์ดํ•˜์ผ ๊ฒฝ์šฐ 0.3%๊นŒ์ง€ ๋–จ์–ด์ง€๋ฉด ๋งค๋„ result = True elif band == 3 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[2] / 100)): # 5% ์ด์ƒ์ผ ๊ฒฝ์šฐ ๊ณ ๊ฐ€๋Œ€๋น„ -3%๊นŒ์ง€ ๋–จ์–ด์ง€๋ฉด ๋งค๋„ result = True elif band == 4 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[3] / 100)): result = True elif band == 5 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[4] / 100)): result = True elif band == 6 and ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.๋งค๋„๊ตฌ๊ฐ„๋ณ„์กฐ๊ฑด[5] / 100)): result = True elif band == 7 and ํ˜„์žฌ๊ฐ€ >= (hogacal(์‹œ๊ฐ€, -3, self.portfolio[code].์‹œ์žฅ, '์ƒํ•œ๊ฐ€')): result = True self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = band # ํฌํŠธํด๋ฆฌ์˜ค์— ๋งค๋„๊ตฌ๊ฐ„ ์—…๋ฐ์ดํŠธ if current_time >= '15:10:00': # 15์‹œ 10๋ถ„์— ๋งค๋„ ์ฒ˜๋ฆฌ result = True """ if self.portfolio[code].๋งค๋„์ „๋žต๋ณ€๊ฒฝ1 == False and current_time >= '11:00:00' and current_time < '13:00:00': self.portfolio[code].๋งค๋„์ „๋žต๋ณ€๊ฒฝ1 = True self.portfolio[code].๋งค๋„์ „๋žต = self.portfolio[code].๋งค๋„์ „๋žต * 0.6 elif self.portfolio[code].๋งค๋„์ „๋žต๋ณ€๊ฒฝ2 == False and current_time >= '13:00:00': self.portfolio[code].๋งค๋„์ „๋žต๋ณ€๊ฒฝ2 = True self.portfolio[code].๋งค๋„์ „๋žต = self.portfolio[code].๋งค๋„์ „๋žต * 0.6 if self.portfolio[code].๋งค๋„์ „๋žต < 0.3: self.portfolio[code].๋งค๋„์ „๋žต = 0.3 # 2. ์ต์ ˆ ๋งค๋„ ์ „๋žต if ํ˜„์žฌ๊ฐ€ >= ๋งค์ˆ˜๊ฐ€ * (1 + (self.portfolio[code].๋งค๋„์ „๋žต / 100)): result = True sell_price = ํ˜„์žฌ๊ฐ€ # 3. ๊ณ ๊ฐ€๋Œ€๋น„ ๋น„์œจ ๋งค๋„ ์ „๋žต # elif ํ˜„์žฌ๊ฐ€ <= ๊ณ ๊ฐ€ * (1 + (self.๊ณ ๊ฐ€๋Œ€๋น„ / 100)): # result = True # sell_price = ํ˜„์žฌ๊ฐ€ # 4. ์†์ ˆ ๋งค๋„ ์ „๋žต # elif ํ˜„์žฌ๊ฐ€ <= ๋งค์ˆ˜๊ฐ€ * (1 + (self.์†์ ˆ / 100)): # result = True # sell_price = ํ˜„์žฌ๊ฐ€ """ return result, sell_price # ๋‹น์ผ์ฒญ์‚ฐ ์ „๋žต def clearning_strategy(self): if self.clearcheck == True: print('๋‹น์ผ์ฒญ์‚ฐ ๋งค๋„') try: for code in list(self.portfolio.keys()): if self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting('S_%s' % code) is None and self.portfolio[code].์ˆ˜๋Ÿ‰ != 0: self.portfolio[code].๋งค๋„๊ตฌ๊ฐ„ = 0 self.๋งค๋„๋ฐฉ๋ฒ• = '03' # 03:์‹œ์žฅ๊ฐ€ (result, order) = self.์ •๋Ÿ‰๋งค๋„(sRQName='S_%s' % code, ์ข…๋ชฉ์ฝ”๋“œ=code, ๋งค๋„๊ฐ€=self.portfolio[code].๋งค์ˆ˜๊ฐ€, ์ˆ˜๋Ÿ‰=self.portfolio[code].์ˆ˜๋Ÿ‰) if result == True: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock['S_%s' % code] = True Telegram('[StockTrader]์ •๋Ÿ‰๋งค๋„(๋‹น์ผ์ฒญ์‚ฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % (code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰), send='mc') logger.info('์ •๋Ÿ‰๋งค๋„(๋‹น์ผ์ฒญ์‚ฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % (code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰)) else: Telegram('[StockTrader]์ •์•ก๋งค๋„์‹คํŒจ(๋‹น์ผ์ฒญ์‚ฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % (code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰), send='mc') logger.info('์ •๋Ÿ‰๋งค๋„์‹คํŒจ(๋‹น์ผ์ฒญ์‚ฐ) : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ์ˆ˜๋Ÿ‰=%s' % (code, self.portfolio[code].์ข…๋ชฉ๋ช…, self.portfolio[code].์ˆ˜๋Ÿ‰)) except Exception as e: print("clearning_strategy Error :", e) # ์ฃผ๋ฌธ์ฒ˜๋ฆฌ def ์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ(self, param): if self.running == True: ์ฒด๊ฒฐ์‹œ๊ฐ„ = '%s %s:%s:%s' % (str(self.d), param['์ฒด๊ฒฐ์‹œ๊ฐ„'][0:2], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][2:4], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][4:]) ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] ํ˜„์žฌ๊ฐ€ = abs(int(float(param['ํ˜„์žฌ๊ฐ€']))) ์ „์ผ๋Œ€๋น„ = int(float(param['์ „์ผ๋Œ€๋น„'])) ๋“ฑ๋ฝ๋ฅ  = float(param['๋“ฑ๋ฝ๋ฅ ']) ๋งค๋„ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค๋„ํ˜ธ๊ฐ€']))) ๋งค์ˆ˜ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค์ˆ˜ํ˜ธ๊ฐ€']))) ๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰ = abs(int(float(param['๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰']))) ์‹œ๊ฐ€ = abs(int(float(param['์‹œ๊ฐ€']))) ๊ณ ๊ฐ€ = abs(int(float(param['๊ณ ๊ฐ€']))) ์ €๊ฐ€ = abs(int(float(param['์ €๊ฐ€']))) ๊ฑฐ๋ž˜ํšŒ์ „์œจ = abs(float(param['๊ฑฐ๋ž˜ํšŒ์ „์œจ'])) ์‹œ๊ฐ€์ด์•ก = abs(int(float(param['์‹œ๊ฐ€์ด์•ก']))) ์ „์ผ์ข…๊ฐ€ = ํ˜„์žฌ๊ฐ€ - ์ „์ผ๋Œ€๋น„ # MainWindow์˜ __init__์—์„œ CODE_POOL ๋ณ€์ˆ˜ ์„ ์–ธ(self.CODE_POOL = self.getting_code_pool()), pool[์ข…๋ชฉ์ฝ”๋“œ] = [์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์‹œ๊ฐ€์ด์•ก] ์ข…๋ชฉ๋ช… = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][1] # pool[์ข…๋ชฉ์ฝ”๋“œ] = [์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€, ์‹œ๊ฐ€์ด์•ก] ์‹œ์žฅ๊ตฌ๋ถ„ = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][0] ์ „์ผ์ข…๊ฐ€ = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][3] ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ์ „์ผ์ข…๊ฐ€] self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (์ฒด๊ฒฐ์‹œ๊ฐ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, ์ „์ผ๋Œ€๋น„)) # ์ •์•ก๋งค๋„ ํ›„ ํฌํŠธํด๋ฆฌ์˜ค/๋งค๋„ํ• ์ข…๋ชฉ์—์„œ ์ œ๊ฑฐ if ์ข…๋ชฉ์ฝ”๋“œ in self.๋งค๋„ํ• ์ข…๋ชฉ: if self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) is not None and self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting('S_%s' % ์ข…๋ชฉ์ฝ”๋“œ) is None: # ๋งค๋„ ์ „๋žต๋ณ„ ๋ชจ๋‹ˆํ„ฐ๋ง ์ฒดํฌ sell_check, ๋งค๋„๊ฐ€ = self.sell_strategy(์ข…๋ชฉ์ฝ”๋“œ, ์‹œ์„ธ) if sell_check == True: (result, order) = self.์ •์•ก๋งค๋„(sRQName='S_%s' % ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ๋งค๋„๊ฐ€=๋งค๋„๊ฐ€, ์ˆ˜๋Ÿ‰=self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰) if result == True: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock['S_%s' % ์ข…๋ชฉ์ฝ”๋“œ] = True if ์ข…๋ชฉ์ฝ”๋“œ not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ.adding(์ข…๋ชฉ์ฝ”๋“œ) Telegram('[StockTrader]%s ๋งค๋„์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ตฌ๊ฐ„=%s, ๋งค๋„๊ฐ€=%s, ์ˆ˜๋Ÿ‰=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ตฌ๊ฐ„, ํ˜„์žฌ๊ฐ€, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰), send='mc') logger.info('[StockTrader]%s ๋งค๋„์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ตฌ๊ฐ„=%s, ๋งค๋„๊ฐ€=%s, ์ˆ˜๋Ÿ‰=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ตฌ๊ฐ„, ํ˜„์žฌ๊ฐ€, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰)) else: Telegram('[StockTrader]%s ๋งค๋„์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ฐ€=%s, ์ˆ˜๋Ÿ‰=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰), send='mc') logger.info('[StockTrader]%s ๋งค๋„์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค๋„๊ฐ€=%s, ์ˆ˜๋Ÿ‰=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].์ˆ˜๋Ÿ‰)) # ๋งค์ˆ˜ํ•  ์ข…๋ชฉ์— ๋Œ€ํ•ด์„œ ์ •์•ก๋งค์ˆ˜ ์ฃผ๋ฌธํ•˜๊ณ  ํฌํŠธํด๋ฆฌ์˜ค/๋งค๋„ํ• ์ข…๋ชฉ์— ์ถ”๊ฐ€, ๋งค์ˆ˜ํ• ์ข…๋ชฉ์—์„œ ์ œ์™ธ if current_time <= '14:30:00': if ์ข…๋ชฉ์ฝ”๋“œ in self.๋งค์ˆ˜ํ• ์ข…๋ชฉ and ์ข…๋ชฉ์ฝ”๋“œ not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: if length(self.portfolio) < self.์ตœ๋Œ€ํฌํŠธ์ˆ˜ and self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) is None and self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.getting('B_%s' % ์ข…๋ชฉ์ฝ”๋“œ) is None: buy_check = self.buy_strategy(์ข…๋ชฉ์ฝ”๋“œ, ์‹œ์„ธ) if buy_check == True: (result, order) = self.์ •์•ก๋งค์ˆ˜(sRQName='B_%s' % ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ๋งค์ˆ˜๊ฐ€=ํ˜„์žฌ๊ฐ€, ๋งค์ˆ˜๊ธˆ์•ก=self.๋‹จ์œ„ํˆฌ์ž๊ธˆ) if result == True: self.portfolio[์ข…๋ชฉ์ฝ”๋“œ] = CPortStock(์ข…๋ชฉ์ฝ”๋“œ=์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…=์ข…๋ชฉ๋ช…, ์‹œ์žฅ=์‹œ์žฅ๊ตฌ๋ถ„, ๋งค์ˆ˜๊ฐ€=ํ˜„์žฌ๊ฐ€, ๋ณด์œ ์ผ=self.๋ณด์œ ์ผ, ๋งค๋„์ „๋žต = self.์ต์ ˆ, ๋งค์ˆ˜์ผ=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock['B_%s' % ์ข…๋ชฉ์ฝ”๋“œ] = True Telegram('[StockTrader]%s ๋งค์ˆ˜์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€), send='mc') logger.info('[StockTrader]%s ๋งค์ˆ˜์ฃผ๋ฌธ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€)) else: Telegram('[StockTrader]%s ๋งค์ˆ˜์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€), send='mc') logger.info('[StockTrader]%s ๋งค์ˆ˜์‹คํŒจ : ์ข…๋ชฉ์ฝ”๋“œ=%s, ์ข…๋ชฉ๋ช…=%s, ๋งค์ˆ˜๊ฐ€=%s' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€)) else: if self.๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง == True: self.parent.ConditionTick.stop() self.๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง = False logger.info("๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์‹œ๊ฐ„ ์ดˆ๊ณผ") def ์ ‘์ˆ˜์ฒ˜๋ฆฌ(self, param): pass # OnReceiveChejanData์—์„œ ์ฒด๊ฒฐ์ฒ˜๋ฆฌ๊ฐ€ ๋˜๋ฉด ์ฒด๊ฒฐ์ฒ˜๋ฆฌ ํ˜ธ์ถœ def ์ฒด๊ฒฐ์ฒ˜๋ฆฌ(self, param): ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] ์ฃผ๋ฌธ๋ฒˆํ˜ธ = param['์ฃผ๋ฌธ๋ฒˆํ˜ธ'] self.์ฃผ๋ฌธ๊ฒฐ๊ณผ[์ฃผ๋ฌธ๋ฒˆํ˜ธ] = param ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ = int(param['์ฃผ๋ฌธ์ˆ˜๋Ÿ‰']) ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ = int(param['๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰']) ์ฒด๊ฒฐ๊ฐ€ = int(0 if (param['์ฒด๊ฒฐ๊ฐ€'] is None or param['์ฒด๊ฒฐ๊ฐ€'] == '') else param['์ฒด๊ฒฐ๊ฐ€']) # ๋งค์ž…๊ฐ€ ๋™์ผ ๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰ = int(0 if (param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰'] is None or param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰'] == '') else param['๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰']) ๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ = int(0 if (param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ'] is None or param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ'] == '') else param['๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ']) ๋‹น์ผ๋งค๋งค์„ธ๊ธˆ = int(0 if (param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] is None or param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] == '') else param['๋‹น์ผ๋งค๋งค์„ธ๊ธˆ']) # ๋งค์ˆ˜ if param['๋งค๋„์ˆ˜๊ตฌ๋ถ„'] == '2': if self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘.getting(์ฃผ๋ฌธ๋ฒˆํ˜ธ) is not None: ์ฃผ๋ฌธ = self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘[์ฃผ๋ฌธ๋ฒˆํ˜ธ] ๋งค์ˆ˜๊ฐ€ = int(์ฃผ๋ฌธ[2:]) P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.์ข…๋ชฉ๋ช… = param['์ข…๋ชฉ๋ช…'] P.๋งค์ˆ˜๊ฐ€ = ์ฒด๊ฒฐ๊ฐ€ # ๋‹จ์œ„์ฒด๊ฒฐ๊ฐ€ P.์ˆ˜๋Ÿ‰ += ๋‹จ์œ„์ฒด๊ฒฐ๋Ÿ‰ # ์ถ”๊ฐ€ ๋งค์ˆ˜ ๋Œ€๋น„ํ•ด์„œ ๊ธฐ์กด ์ˆ˜๋Ÿ‰์— ์ฒด๊ฒฐ๋œ ์ˆ˜๋Ÿ‰ ๊ณ„์† ๋”ํ•จ(์ฃผ๋ฌธ์ˆ˜๋Ÿ‰ - ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰) P.๋งค์ˆ˜์ผ = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') else: logger.error('ERROR ํฌํŠธ์— ์ข…๋ชฉ์ด ์—†์Œ !!!!') if ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ == 0: try: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.pop(์ฃผ๋ฌธ) self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.remove(์ข…๋ชฉ์ฝ”๋“œ) self.๋งค๋„ํ• ์ข…๋ชฉ.adding(์ข…๋ชฉ์ฝ”๋“œ) self.save_history(์ข…๋ชฉ์ฝ”๋“œ, status='๋งค์ˆ˜') Telegram('[StockTrader]%s ๋งค์ˆ˜์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ๋งค์ˆ˜๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (self.sName, P.์ข…๋ชฉ๋ช…, P.๋งค์ˆ˜๊ฐ€, P.์ˆ˜๋Ÿ‰), send='mc') logger.info('[StockTrader]%s %s ๋งค์ˆ˜ ์™„๋ฃŒ : ๋งค์ˆ˜/์ฃผ๋ฌธ%s Pop, ๋งค๋„ Append ' % (self.sName, ์ข…๋ชฉ์ฝ”๋“œ, ์ฃผ๋ฌธ)) except Exception as e: Telegram('[StockTrader]%s ์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค์ˆ˜ POP์—๋Ÿฌ ์ข…๋ชฉ๋ช…:%s ' % (self.sName, P.์ข…๋ชฉ๋ช…), send='mc') logger.error('[StockTrader]%s ์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค์ˆ˜ POP์—๋Ÿฌ ์ข…๋ชฉ๋ช…:%s ' % (self.sName, P.์ข…๋ชฉ๋ช…)) # ๋งค๋„ if param['๋งค๋„์ˆ˜๊ตฌ๋ถ„'] == '1': if self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘.getting(์ฃผ๋ฌธ๋ฒˆํ˜ธ) is not None: ์ฃผ๋ฌธ = self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘[์ฃผ๋ฌธ๋ฒˆํ˜ธ] ๋งค๋„๊ฐ€ = int(์ฃผ๋ฌธ[2:]) try: if ๋ฏธ์ฒด๊ฒฐ์ˆ˜๋Ÿ‰ == 0: self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock.pop(์ฃผ๋ฌธ) P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.์ข…๋ชฉ๋ช… = param['์ข…๋ชฉ๋ช…'] self.portfolio[์ข…๋ชฉ์ฝ”๋“œ].๋งค๋„๊ฐ€ = ์ฒด๊ฒฐ๊ฐ€ self.save_history(์ข…๋ชฉ์ฝ”๋“œ, status='๋งค๋„') Telegram('[StockTrader]%s ๋งค๋„์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ฒด๊ฒฐ๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (self.sName, param['์ข…๋ชฉ๋ช…'], ์ฒด๊ฒฐ๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰), send='mc') logger.info('[StockTrader]%s ๋งค๋„์ฒด๊ฒฐ์™„๋ฃŒ_์ข…๋ชฉ๋ช…:%s, ์ฒด๊ฒฐ๊ฐ€:%s, ์ˆ˜๋Ÿ‰:%s' % (self.sName, param['์ข…๋ชฉ๋ช…'], ์ฒด๊ฒฐ๊ฐ€, ์ฃผ๋ฌธ์ˆ˜๋Ÿ‰)) except Exception as e: Telegram('[StockTrader]%s ์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค๋„ ๋งค๋งค์ด๋ ฅ Error : %s' % (self.sName, e), send='mc') logger.error('[StockTrader]%s ์ฒด๊ฒฐ์ฒ˜๋ฆฌ_๋งค๋„ ๋งค๋งค์ด๋ ฅ Error : %s' % (self.sName, e)) # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() def ์ž”๊ณ ์ฒ˜๋ฆฌ(self, param): ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] P = self.portfolio.getting(์ข…๋ชฉ์ฝ”๋“œ) if P is not None: P.๋งค์ˆ˜๊ฐ€ = int(0 if (param['๋งค์ž…๋‹จ๊ฐ€'] is None or param['๋งค์ž…๋‹จ๊ฐ€'] == '') else param['๋งค์ž…๋‹จ๊ฐ€']) P.์ˆ˜๋Ÿ‰ = int(0 if (param['๋ณด์œ ์ˆ˜๋Ÿ‰'] is None or param['๋ณด์œ ์ˆ˜๋Ÿ‰'] == '') else param['๋ณด์œ ์ˆ˜๋Ÿ‰']) if P.์ˆ˜๋Ÿ‰ == 0: self.portfolio.pop(์ข…๋ชฉ์ฝ”๋“œ) self.๋งค๋„ํ• ์ข…๋ชฉ.remove(์ข…๋ชฉ์ฝ”๋“œ) if ์ข…๋ชฉ์ฝ”๋“œ not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ.adding(์ข…๋ชฉ์ฝ”๋“œ) logger.info('์ž”๊ณ ์ฒ˜๋ฆฌ_ํฌํŠธํด๋ฆฌ์˜คPOP %s ' % ์ข…๋ชฉ์ฝ”๋“œ) # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() # MainWindow์˜ ConditionTick์— ์˜ํ•ด์„œ 3๋ถ„๋งˆ๋‹ค ์‹คํ–‰ def ConditionCheck(self): if '3' in self.sName: if current_time >= "15:00:00" and self.์กฐ๊ฑด๊ฒ€์ƒ‰์ด๋ฒคํŠธ == False: self.์กฐ๊ฑด๊ฒ€์ƒ‰์ด๋ฒคํŠธ = True codes = self.GetCodes(self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค, self.์กฐ๊ฑด์‹๋ช…, self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž…) print(current_time, codes) code_list=[] for code in codes: code_list.adding(code + '_' + self.parent.CODE_POOL[code][1] + '\n') code_list = "".join(code_list) print(current_time, code_list) Telegram(code_list, send='mc') else: pass else: codes = self.GetCodes(self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค, self.์กฐ๊ฑด์‹๋ช…, self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž…) print(current_time, codes) for code in codes: if code not in self.๋งค์ˆ˜ํ• ์ข…๋ชฉ and self.portfolio.getting(code) is None and code not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: print('๋งค์ˆ˜์ข…๋ชฉ์ถ”๊ฐ€ : ', code, self.parent.CODE_POOL[code][1]) self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.adding(code) self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ.adding(code) ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';') # ์‹ค์‹œ๊ฐ„ ์‹œ์„ธ์กฐํšŒ ์ข…๋ชฉ ์ถ”๊ฐ€ logger.debug("[%s]์‹ค์‹œ๊ฐ„๋ฐ์ดํƒ€์š”์ฒญ ๋“ฑ๋ก๊ฒฐ๊ณผ %s %s" % (self.sName, self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ, ret)) # ์‹ค์‹œ๊ฐ„ ์กฐ๊ฒ€ ๊ฒ€์ƒ‰ ํŽธ์ž… ์ข…๋ชฉ ์ฒ˜๋ฆฌ def ์‹ค์‹œ๊ฐ„์กฐ๊ฑด์ฒ˜๋ฆฌ(self, code): if (code not in self.๋งค์ˆ˜ํ• ์ข…๋ชฉ) and (self.portfolio.getting(code) is None) and (code not in self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ): print('๋งค์ˆ˜์ข…๋ชฉ์ถ”๊ฐ€ : ', code) self.๋งค์ˆ˜ํ• ์ข…๋ชฉ.adding(code) self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ.adding(code) ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';') # ์‹ค์‹œ๊ฐ„ ์‹œ์„ธ์กฐํšŒ ์ข…๋ชฉ ์ถ”๊ฐ€ logger.debug("[%s]์‹ค์‹œ๊ฐ„๋ฐ์ดํƒ€์š”์ฒญ ๋“ฑ๋ก๊ฒฐ๊ณผ %s %s" % (self.sName, self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ, ret)) def Run(self, flag=True, sAccount=None): self.running = flag ret = 0 codes = [] self.codeList = [] # self.manual_portfolio() if flag == True: print("%s ROBOT ์‹คํ–‰" % (self.sName)) self.KiwoomConnect() try: logger.info("[%s]์กฐ๊ฑด์‹ ๊ฑฐ๋ž˜ ๋กœ๋ด‡ ์‹คํ–‰"%(self.sName)) self.sAccount = Account self.์ฃผ๋ฌธ๊ฒฐ๊ณผ = dict() self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘ = dict() self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock = dict() self.ํˆฌ์ž์ด์•ก = floor(int(d2deposit.replacing(",", "")) * (self.ํˆฌ์ž๊ธˆ๋น„์ค‘ / 100)) print('D+2 ์˜ˆ์ˆ˜๊ธˆ : ', int(d2deposit.replacing(",", ""))) print('ํˆฌ์ž๊ธˆ : ', self.ํˆฌ์ž์ด์•ก) print('๋‹จ์œ„ํˆฌ์ž๊ธˆ : ', self.๋‹จ์œ„ํˆฌ์ž๊ธˆ) self.์ตœ๋Œ€ํฌํŠธ์ˆ˜ = self.ํฌํŠธํด๋ฆฌ์˜ค์ˆ˜ # floor(self.ํˆฌ์ž์ด์•ก / self.๋‹จ์œ„ํˆฌ์ž๊ธˆ) + length(self.portfolio) # print('๊ธฐ์กดํฌํŠธ์ˆ˜ : ', length(self.portfolio)) print('์ตœ๋Œ€ํฌํŠธ์ˆ˜ : ', self.์ตœ๋Œ€ํฌํŠธ์ˆ˜) print("์กฐ๊ฑด์‹ ์ธ๋ฑ์Šค : ", self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค, type(self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค)) print("์กฐ๊ฑด์‹๋ช… : ", self.์กฐ๊ฑด์‹๋ช…) if self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž… == 0: # 3๋ถ„๋ด‰ ๊ฒ€์ƒ‰ self.parent.ConditionTick.start(1000) else: # ์‹ค์‹œ๊ฐ„ ๊ฒ€์ƒ‰ print('์‹ค์‹œ๊ฐ„ ์กฐ๊ฑด๊ฒ€์ƒ‰') codes = self.GetCodes(self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค, self.์กฐ๊ฑด์‹๋ช…, self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž…) codes = [] self.์ดˆ๊ธฐ์กฐ๊ฑด(codes) print("๋งค์ˆ˜ : ", self.๋งค์ˆ˜ํ• ์ข…๋ชฉ) print("๋งค๋„ : ", self.๋งค๋„ํ• ์ข…๋ชฉ) self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = self.๋งค๋„ํ• ์ข…๋ชฉ + self.๋งค์ˆ˜ํ• ์ข…๋ชฉ logger.info("[%s]์˜ค๋Š˜ ๊ฑฐ๋ž˜ ์ข…๋ชฉ : %s" % (self.sName, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';')) if length(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';') # ์‹ค์‹œ๊ฐ„ ์‹œ์„ธ์กฐํšŒ ๋“ฑ๋ก logger.debug("์‹ค์‹œ๊ฐ„๋ฐ์ดํƒ€์š”์ฒญ ๋“ฑ๋ก๊ฒฐ๊ณผ %s" % ret) except Exception as e: print('[%s]_Run Error : %s' % (self.sName,e)) Telegram('[StockTrader][%s]_Run Error : %s' % (self.sName,e), send='mc') logger.error('[StockTrader][%s]_Run Error : %s' % (self.sName,e)) else: if self.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž… == 0: self.parent.ConditionTick.stop() # MainWindow ํƒ€์ด๋จธ ์ค‘์ง€ else: ret = self.sendConditionStop("0156", self.์กฐ๊ฑด์‹๋ช…, self.์กฐ๊ฑด์‹์ธ๋ฑ์Šค) # ์‹ค์‹œ๊ฐ„ ์กฐ๊ฒ€ ๊ฒ€์ƒ‰ ์ค‘์ง€ ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL') if self.portfolio is not None: for code in list(self.portfolio.keys()): if self.portfolio[code].์ˆ˜๋Ÿ‰ == 0: self.portfolio.pop(code) if length(self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ) > 0: try: Telegram("[StockTrader]%s ๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ ์†์ต Upload : %s" % (self.sName, self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ), send='mc') logger.info("[%s]๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ ์†์ต Upload : %s" % (self.sName, self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ)) self.parent.statusbar.showMessage("๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ ์†์ต Upload") self.DailyProfit(self.๊ธˆ์ผ๋งค๋„์ข…๋ชฉ) except Exception as e: print('%s ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ ๊ฒฐ๊ณผ ์—…๋กœ๋“œ Error : %s' %(self.sName, e)) fintotal_ally: del self.DailyProfitLoop # ๊ธˆ์ผ๋งค๋„๊ฒฐ๊ณผ ์—…๋ฐ์ดํŠธ ์‹œ QEventLoop ์‚ฌ์šฉ์œผ๋กœ ๋กœ๋ด‡ ์ €์žฅ ์‹œ pickcle ์—๋Ÿฌ ๋ฐœ์ƒํ•˜์—ฌ ์‚ญ์ œ์‹œํ‚ด del self.ConditionLoop self.KiwoomDisConnect() # ๋กœ๋ด‡ ํด๋ž˜์Šค ๋‚ด์—์„œ ์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต ๋ฐ์ดํ„ฐ๋ฅผ ๋ฐ›๊ณ ๋‚˜์„œ ์—ฐ๊ฒฐ ํ•ด์ œ์‹œํ‚ด # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() class ํ™”๋ฉด_ConditionMonitoring(QDialog, Ui_TradeCondition): def __init__(self, sScreenNo, kiwoom=None, parent=None): # super(ํ™”๋ฉด_ConditionMonitoring, self).__init__(parent) # self.setAttribute(Qt.WA_DeleteOnClose) # ์œ„์ ฏ์ด ๋‹ซํž๋•Œ ๋‚ด์šฉ ์‚ญ์ œํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์ฐฝ์ด ๋‹ซํž๋•Œ ์ •๋ณด๋ฅผ ์ €์žฅํ•ด์•ผ๋˜๋Š” ๋กœ๋ด‡ ์„ธํŒ… ์‹œ์—๋Š” ์“ฐ๋ฉด ์—๋Ÿฌ๋‚จ!! self.setupUi(self) self.setWindowTitle("ConditionMonitoring") self.lineEdit_name.setText('ConditionMonitoring') self.progressBar.setValue(0) # Progressbar ์ดˆ๊ธฐ ์…‹ํŒ… self.sScreenNo = sScreenNo self.kiwoom = kiwoom # self.parent = parent self.model = MonkeyModel() self.tableView.setModel(self.model) self.columns = ['์ข…๋ชฉ์ฝ”๋“œ', '์ข…๋ชฉ๋ช…', '์กฐ๊ฑด์‹'] self.result = [] self.KiwoomConnect() self.GetCondition() # ์ €์žฅ๋œ ์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ๋ชฉ๋ก ์ฝ์Œ def GetCondition(self): try: self.gettingConditionLoad() self.kf_condition = KnowledgeFrame() self.idx = [] self.conName = [] for index in self.condition.keys(): # condition์€ dictionary # print(self.condition) self.idx.adding(str(index)) self.conName.adding(self.condition[index]) # self.sendCondition("0156", self.condition[index], index, 1) self.kf_condition['Index'] = self.idx self.kf_condition['Name'] = self.conName self.kf_condition['Table'] = ">> ์กฐ๊ฑด์‹ " + self.kf_condition['Index'] + " : " + self.kf_condition['Name'] self.kf_condition['Index'] = self.kf_condition['Index'].totype(int) self.kf_condition = self.kf_condition.sort_the_values(by='Index').reseting_index(sip=True) # ์ถ”๊ฐ€ print(self.kf_condition) # ์ถ”๊ฐ€ self.comboBox_condition.clear() self.comboBox_condition.addItems(self.kf_condition['Table'].values) except Exception as e: print("GetCondition_Error") print(e) # ์กฐ๊ฑด๊ฒ€์ƒ‰ ํ•ด๋‹น ์ข…๋ชฉ ์š”์ฒญ ๋ฉ”์„œ๋“œ def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime): isRequest = self.kiwoom.dynamicCtotal_all("SendCondition(QString, QString, int, int", screenNo, conditionName, conditionIndex, isRealTime) # OnReceiveTrCondition() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ self.conditionLoop = QEventLoop() self.conditionLoop.exec_() # ์กฐ๊ฑด ๊ฒ€์ƒ‰ ๊ด€๋ จ ActiveX์™€ On์‹œ๋ฆฌ์ฆˆ์™€ ๋ถ™์ž„(์ฝœ๋ฐฑ) def KiwoomConnect(self): self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer) self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition) # ์กฐ๊ฑด ๊ฒ€์ƒ‰ ๊ด€๋ จ ActiveX์™€ On์‹œ๋ฆฌ์ฆˆ ์—ฐ๊ฒฐ ํ•ด์ œ def KiwoomDisConnect(self): self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer) self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition) # ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ ๋ฉ”์„œ๋“œ def gettingConditionLoad(self): self.kiwoom.dynamicCtotal_all("GetConditionLoad()") # OnReceiveConditionVer() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ self.conditionLoop = QEventLoop() self.conditionLoop.exec_() # ์กฐ๊ฑด์‹ ๋ชฉ๋ก ํš๋“ ๋ฉ”์„œ๋“œ(์กฐ๊ฑด์‹ ๋ชฉ๋ก์„ ๋”•์…”๋„ˆ๋ฆฌ๋กœ ๋ฆฌํ„ด) def gettingConditionNameList(self): data = self.kiwoom.dynamicCtotal_all("GetConditionNameList()") conditionList = data.split(';') del conditionList[-1] conditionDictionary = {} for condition in conditionList: key, value = condition.split('^') conditionDictionary[int(key)] = value return conditionDictionary # ์กฐ๊ฑด๊ฒ€์ƒ‰ ์„ธ๋ถ€ ์ข…๋ชฉ ์กฐํšŒ ์š”์ฒญ์‹œ ๋ฐœ์ƒ๋˜๋Š” ์ด๋ฒคํŠธ def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext): logger.debug('main:OnReceiveTrCondition [%s] [%s] [%s] [%s] [%s]' % ( sScrNo, strCodeList, strConditionName, nIndex, nNext)) try: if strCodeList == "": return self.codeList = strCodeList.split(';') del self.codeList[-1] # print("์ข…๋ชฉ๊ฐœ์ˆ˜: ", length(self.codeList)) # print(self.codeList) for code in self.codeList: row = [] # code.adding(c) row.adding(code) n = self.kiwoom.dynamicCtotal_all("GetMasterCodeName(QString)", code) # now = abs(int(self.kiwoom.dynamicCtotal_all("GetCommRealData(QString, int)", code, 10))) # name.adding(n) row.adding(n) row.adding(strConditionName) self.result.adding(row) # self.kf_con['์ข…๋ชฉ์ฝ”๋“œ'] = code # self.kf_con['์ข…๋ชฉ๋ช…'] = name # print(self.kf_con) self.data = KnowledgeFrame(data=self.result, columns=self.columns) self.data['์ข…๋ชฉ์ฝ”๋“œ'] = "'" + self.data['์ข…๋ชฉ์ฝ”๋“œ'] self.data = self.data.sort_the_values(by=['์กฐ๊ฑด์‹', '์ข…๋ชฉ๋ช…']) self.data = self.data.sip_duplicates(['์ข…๋ชฉ๋ช…', '์กฐ๊ฑด์‹'], keep='first').reseting_index(sip=True) print(self.data) self.model.umkate(self.data) # self.model.umkate(self.kf_con) for i in range(length(self.columns)): self.tableView.resizeColumnToContents(i) fintotal_ally: time.sleep(2) self.conditionLoop.exit() # ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ์— ๋Œ€ํ•œ ์‘๋‹ต ์ด๋ฒคํŠธ def OnReceiveConditionVer(self, lRet, sMsg): logger.debug('main:OnReceiveConditionVer : [์ด๋ฒคํŠธ] ์กฐ๊ฑด์‹ ์ €์žฅ [%s] [%s]' % (lRet, sMsg)) try: self.condition = self.gettingConditionNameList() # condition์ด ๋ฆฌํ„ด๋˜์„œ ์˜ค๋ฉด GetCondition์—์„œ condition ๋ณ€์ˆ˜ ์‚ฌ์šฉ ๊ฐ€๋Šฅ # print("์กฐ๊ฑด์‹ ๊ฐœ์ˆ˜: ", length(self.condition)) # for key in self.condition.keys(): # print("์กฐ๊ฑด์‹: ", key, ": ", self.condition[key]) except Exception as e: print("OnReceiveConditionVer_Error") fintotal_ally: self.conditionLoop.exit() # print(self.conditionName) # self.kiwoom.dynamicCtotal_all("SendCondition(QString,QString, int, int)", '0156', '๊ฐญ์ƒ์Šน', 0, 0) # ์‹ค์‹œ๊ฐ„ ์ข…๋ชฉ ์กฐ๊ฑด๊ฒ€์ƒ‰ ์š”์ฒญ์‹œ ๋ฐœ์ƒ๋˜๋Š” ์ด๋ฒคํŠธ def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex): logger.debug( 'main:OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex)) print("์ข…๋ชฉ์ฝ”๋“œ: ", sTrCode) print("์ด๋ฒคํŠธ: ", "์ข…๋ชฉํŽธ์ž…" if strType == "I" else "์ข…๋ชฉ์ดํƒˆ") # ์กฐ๊ฑด์‹ ์ข…๋ชฉ ๊ฒ€์ƒ‰ ๋ฒ„ํŠผ ํด๋ฆญ ์‹œ ์‹คํ–‰๋จ(์‹œ๊ทธ๋„/์Šฌ๋กฏ ์ถ”๊ฐ€) def inquiry(self): self.result = [] cnt=0 print('์กฐ๊ฑด์‹ ๊ฐฏ์ˆ˜ :', length(self.kf_condition)) for idx in range(length(self.kf_condition)): print(idx, self.condition[idx]) self.sendCondition("0156", self.condition[idx], idx, 0) cnt += 1 # Progress Bar ๋””์Šคํ”Œ๋ ˆ์ด(์ „์ฒด ์‹œ๊ฐ„ ๋Œ€๋น„ ๋น„์œจ) self.progressBar.setValue(cnt / length(self.kf_condition) * 100) print('์กฐ๊ฑด์‹ ์ข…๋ชฉ ์กฐํšŒ ์™„๋ฃŒ') self.parent.statusbar.showMessage("์กฐ๊ฑด์‹ ์ข…๋ชฉ ์กฐํšŒ ์™„๋ฃŒ") # ์›ํ•˜๋Š” ์ข…๋ชฉ/์ฃผ๊ฐ€ ์„ค์ • ํ›„ ์•Œ๋ฆผ class CPriceMonitoring(CTrade): # ๋กœ๋ด‡ ์ถ”๊ฐ€ ์‹œ __init__ : ๋ณต์‚ฌ, Setting, ์ดˆ๊ธฐ์กฐ๊ฑด:์ „๋žต์— ๋งž๊ฒŒ, ๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ~Run:๋ณต์‚ฌ def __init__(self, sName, UUID, kiwoom=None, parent=None): self.sName = sName self.UUID = UUID self.sAccount = None self.kiwoom = kiwoom self.parent = parent self.running = False self.์ฃผ๋ฌธ๊ฒฐ๊ณผ = dict() self.์ฃผ๋ฌธ๋ฒˆํ˜ธ_์ฃผ๋ฌธ_๋งคํ•‘ = dict() self.์ฃผ๋ฌธ์‹คํ–‰์ค‘_Lock = dict() self.portfolio = dict() self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = [] self.Smtotal_allScreenNumber = 9999 self.d = today # RobotAdd ํ•จ์ˆ˜์—์„œ ์ดˆ๊ธฐํ™” ๋‹ค์Œ ์…‹ํŒ… ์‹คํ–‰ํ•ด์„œ ์„ค์ •๊ฐ’ ๋„˜๊น€ def Setting(self, sScreenNo): self.sScreenNo = sScreenNo # ์ˆ˜๋™ ํฌํŠธํด๋ฆฌ์˜ค ์ƒ์„ฑ def manual_portfolio(self): self.portfolio = dict() self.Stocklist = { '005935': {'์ข…๋ชฉ๋ช…': '์‚ผ์„ฑ์ „์ž์šฐ', '์ข…๋ชฉ์ฝ”๋“œ': '005935', '์‹œ์žฅ': 'KOSPI', '๋งค์ˆ˜๊ฐ€': 50600, '์ˆ˜๋Ÿ‰': 10, '๋งค์ˆ˜์ผ': '2020/09/24 09:00:00'}, '092130': {'์ข…๋ชฉ๋ช…': '์ดํฌ๋ ˆ๋”๋ธ”', '์ข…๋ชฉ์ฝ”๋“œ': '092130', '์‹œ์žฅ': 'KOSDAQ', '๋งค์ˆ˜๊ฐ€': 24019, '์ˆ˜๋Ÿ‰': 21, '๋งค์ˆ˜์ผ': '2020/11/04 09:00:00'}, '271560': {'์ข…๋ชฉ๋ช…': '์˜ค๋ฆฌ์˜จ', '์ข…๋ชฉ์ฝ”๋“œ': '271560', '์‹œ์žฅ': 'KOSPI', '๋งค์ˆ˜๊ฐ€': 132000, '์ˆ˜๋Ÿ‰': 10, '๋งค์ˆ˜์ผ': '2020/10/08 09:00:00'}, } for code in list(self.Stocklist.keys()): self.portfolio[code] = CPortStock_LongTerm(์ข…๋ชฉ์ฝ”๋“œ=code, ์ข…๋ชฉ๋ช…=self.Stocklist[code]['์ข…๋ชฉ๋ช…'], ์‹œ์žฅ=self.Stocklist[code]['์‹œ์žฅ'], ๋งค์ˆ˜๊ฐ€=self.Stocklist[code]['๋งค์ˆ˜๊ฐ€'], ์ˆ˜๋Ÿ‰=self.Stocklist[code]['์ˆ˜๋Ÿ‰'], ๋งค์ˆ˜์ผ=self.Stocklist[code]['๋งค์ˆ˜์ผ']) # Robot_Run์ด ๋˜๋ฉด ์‹คํ–‰๋จ - ๋งค์ˆ˜/๋งค๋„ ์ข…๋ชฉ์„ ๋ฆฌ์ŠคํŠธ๋กœ ์ €์žฅ def ์ดˆ๊ธฐ์กฐ๊ฑด(self): self.parent.statusbar.showMessage("[%s] ์ดˆ๊ธฐ์กฐ๊ฑด์ค€๋น„" % (self.sName)) row_data = price_monitoring_sheet.getting_total_all_values() self.stocklist = {} self.Data_save = False for row in row_data[1:]: temp = [] try: code, name, market = getting_code(row[0]) # ์ข…๋ชฉ๋ช…์œผ๋กœ ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์‹œ์žฅ ๋ฐ›์•„์„œ(getting_code ํ•จ์ˆ˜) ์ถ”๊ฐ€ except Exception as e: name = '' code = '' market = '' print('๊ตฌ๊ธ€ ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์ข…๋ชฉ๋ช… ์˜ค๋ฅ˜ : %s' % (row[1])) logger.error('๊ตฌ๊ธ€ ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์˜ค๋ฅ˜ : %s' % (row[1])) Telegram('[StockTrader]๊ตฌ๊ธ€ ๋งค์ˆ˜๋ชจ๋‹ˆํ„ฐ๋ง ์‹œํŠธ ์˜ค๋ฅ˜ : %s' % (row[1])) for idx in range(1, length(row)): if row[idx] != '': temp.adding(int(row[idx])) self.stocklist[code] = { '์ข…๋ชฉ๋ช…': name, '์ข…๋ชฉ์ฝ”๋“œ': code, '๋ชจ๋‹ˆํ„ฐ๋ง์ฃผ๊ฐ€': temp } print(self.stocklist) self.๋ชจ๋‹ˆํ„ฐ๋ง์ข…๋ชฉ = list(self.stocklist.keys()) try: self.kf_codes = mk.KnowledgeFrame() cnt = 0 for code in self.๋ชจ๋‹ˆํ„ฐ๋ง์ข…๋ชฉ: temp = fdr.DataReader(code) temp = temp[-70:][['Open', 'High', 'Low', 'Close', 'Volume']] temp.reseting_index(inplace=True) temp['Date'] = temp['Date'].totype(str) temp['Code'] = code if cnt == 0: self.kf_codes = temp.clone() else: self.kf_codes = mk.concating([self.kf_codes, temp]) self.kf_codes.reseting_index(sip=True, inplace=True) cnt += 1 except Exception as e: print('CPriceMonitoring_์ดˆ๊ธฐ์กฐ๊ฑด ์˜ค๋ฅ˜ : %s' % (e)) logger.error('CPriceMonitoring_์ดˆ๊ธฐ์กฐ๊ฑด ์˜ค๋ฅ˜ : %s' % (e)) Telegram('[StockTrader]CPriceMonitoring_์ดˆ๊ธฐ์กฐ๊ฑด ์˜ค๋ฅ˜ : %s' % (e)) # ์ด๋™ํ‰๊ท ๊ฐ€ ์œ„์น˜ ํ™•์ธ def MA_Check(self, data): if data['MA5'] < data['MA20']: return True else: return False # ์ด๋™ํ‰๊ท ์„ ์ด์šฉํ•œ ๋งค์ˆ˜ ์ „๋žต ์‹ ํ˜ธ ๋ฐœ์ƒ def MA_Strategy(self, name, code, price): today = datetime.datetime.today().strftime("%Y-%m-%d") ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ๊ฑฐ๋ž˜๋Ÿ‰ = price try: kf = self.kf_codes.loc[self.kf_codes['Code'] == code] kf.reseting_index(sip=True, inplace=True) kf.loc[length(kf)] = [today, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ํ˜„์žฌ๊ฐ€, ๊ฑฐ๋ž˜๋Ÿ‰, code] #['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Code] kf['MA5'] = kf['Close'].rolling(window=5).average() kf['MA20'] = kf['Close'].rolling(window=20).average() kf['MA_Check'] = kf.employ(self.MA_Check, axis=1) if self.Data_save==False and current_time >= '15:19:00': self.Data_save = True self.kf_codes.to_csv('PriceData.csv', encoding='euc-kr', index=False) if kf.iloc[-2]['MA_Check'] == True and kf.iloc[-1]['MA_Check'] == False: Telegram('[StockTrader]%s ๋งค์ˆ˜ ์‹ ํ˜ธ ๋ฐœ์ƒ\nํ˜„์žฌ๊ฐ€ : %s, ์‹œ๊ฐ€ : %s, ๊ณ ๊ฐ€ : %s, ์ €๊ฐ€ : %s' % (name, ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€)) logger.info('[StockTrader]%s ๋งค์ˆ˜ ์‹ ํ˜ธ ๋ฐœ์ƒ\nํ˜„์žฌ๊ฐ€ : %s, ์‹œ๊ฐ€ : %s, ๊ณ ๊ฐ€ : %s, ์ €๊ฐ€ : %s' % (name, ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€)) except Exception as e: print('CPriceMonitoring_MA_Strategy ์˜ค๋ฅ˜ : %s' % (e)) logger.error('CPriceMonitoring_MA_Strategy ์˜ค๋ฅ˜ : %s' % (e)) Telegram('[StockTrader]CPriceMonitoring_MA_Strategy ์˜ค๋ฅ˜ : %s' % (e)) def ์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ(self, param): try: if self.running == True: ์ฒด๊ฒฐ์‹œ๊ฐ„ = '%s %s:%s:%s' % (str(self.d), param['์ฒด๊ฒฐ์‹œ๊ฐ„'][0:2], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][2:4], param['์ฒด๊ฒฐ์‹œ๊ฐ„'][4:]) ์ข…๋ชฉ์ฝ”๋“œ = param['์ข…๋ชฉ์ฝ”๋“œ'] ํ˜„์žฌ๊ฐ€ = abs(int(float(param['ํ˜„์žฌ๊ฐ€']))) ์ „์ผ๋Œ€๋น„ = int(float(param['์ „์ผ๋Œ€๋น„'])) ๋“ฑ๋ฝ๋ฅ  = float(param['๋“ฑ๋ฝ๋ฅ ']) ๋งค๋„ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค๋„ํ˜ธ๊ฐ€']))) ๋งค์ˆ˜ํ˜ธ๊ฐ€ = abs(int(float(param['๋งค์ˆ˜ํ˜ธ๊ฐ€']))) ๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰ = abs(int(float(param['๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰']))) ์‹œ๊ฐ€ = abs(int(float(param['์‹œ๊ฐ€']))) ๊ณ ๊ฐ€ = abs(int(float(param['๊ณ ๊ฐ€']))) ์ €๊ฐ€ = abs(int(float(param['์ €๊ฐ€']))) ๊ฑฐ๋ž˜ํšŒ์ „์œจ = abs(float(param['๊ฑฐ๋ž˜ํšŒ์ „์œจ'])) ์‹œ๊ฐ€์ด์•ก = abs(int(float(param['์‹œ๊ฐ€์ด์•ก']))) ์ข…๋ชฉ๋ช… = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][1] # pool[์ข…๋ชฉ์ฝ”๋“œ] = [์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€, ์‹œ๊ฐ€์ด์•ก] ์‹œ์žฅ๊ตฌ๋ถ„ = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][0] ์ „์ผ์ข…๊ฐ€ = self.parent.CODE_POOL[์ข…๋ชฉ์ฝ”๋“œ][3] ์‹œ์„ธ = [ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€, ๋ˆ„์ ๊ฑฐ๋ž˜๋Ÿ‰] self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (์ฒด๊ฒฐ์‹œ๊ฐ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, ์ „์ผ๋Œ€๋น„)) # print("[%s] %s %s %s %s" % (์ฒด๊ฒฐ์‹œ๊ฐ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, ์ „์ผ๋Œ€๋น„)) if length(self.stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋ชจ๋‹ˆํ„ฐ๋ง์ฃผ๊ฐ€']) > 0: if ํ˜„์žฌ๊ฐ€ in self.stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋ชจ๋‹ˆํ„ฐ๋ง์ฃผ๊ฐ€']: Telegram('[StockTrader]%s ์ฃผ๊ฐ€๋„๋‹ฌ ์•Œ๋ฆผ\nํ˜„์žฌ๊ฐ€ : %s, ์‹œ๊ฐ€ : %s, ๊ณ ๊ฐ€ : %s, ์ €๊ฐ€ : %s' % (์ข…๋ชฉ๋ช…, ํ˜„์žฌ๊ฐ€, ์‹œ๊ฐ€, ๊ณ ๊ฐ€, ์ €๊ฐ€)) self.stocklist[์ข…๋ชฉ์ฝ”๋“œ]['๋ชจ๋‹ˆํ„ฐ๋ง์ฃผ๊ฐ€'].remove(ํ˜„์žฌ๊ฐ€) self.MA_Strategy(์ข…๋ชฉ๋ช…, ์ข…๋ชฉ์ฝ”๋“œ, ์‹œ์„ธ) except Exception as e: print('CTradeLongTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error : %s, %s' % (์ข…๋ชฉ๋ช…, e)) Telegram('[StockTrader]CTradeLongTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error : %s, %s' % (์ข…๋ชฉ๋ช…, e), send='mc') logger.error('CTradeLongTerm_์‹ค์‹œ๊ฐ„๋ฐ์ดํ„ฐ์ฒ˜๋ฆฌ Error :%s, %s' % (์ข…๋ชฉ๋ช…, e)) def ์ ‘์ˆ˜์ฒ˜๋ฆฌ(self, param): pass def ์ฒด๊ฒฐ์ฒ˜๋ฆฌ(self, param): pass def ์ž”๊ณ ์ฒ˜๋ฆฌ(self, param): pass def Run(self, flag=True, sAccount=None): self.running = flag ret = 0 # self.manual_portfolio() if flag == True: print("%s ROBOT ์‹คํ–‰" % (self.sName)) try: Telegram("[StockTrader]%s ROBOT ์‹คํ–‰" % (self.sName)) self.์ดˆ๊ธฐ์กฐ๊ฑด() print('์ดˆ๊ธฐ์กฐ๊ฑด ์„ค์ • ์™„๋ฃŒ') self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = self.๋ชจ๋‹ˆํ„ฐ๋ง์ข…๋ชฉ logger.info("์˜ค๋Š˜ ๊ฑฐ๋ž˜ ์ข…๋ชฉ : %s %s" % (self.sName, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';')) self.KiwoomConnect() # MainWindow ์™ธ์—์„œ ํ‚ค์›€ API๊ตฌ๋™์‹œ์ผœ์„œ ์ž์ฒด์ ์œผ๋กœ API๋ฐ์ดํ„ฐ์†ก์ˆ˜์‹ ๊ฐ€๋Šฅํ•˜๋„๋ก ํ•จ if length(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.์‹ค์‹œ๊ฐ„์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) + ';') logger.debug("[%s]์‹ค์‹œ๊ฐ„๋ฐ์ดํƒ€์š”์ฒญ ๋“ฑ๋ก๊ฒฐ๊ณผ %s" % (self.sName, ret)) except Exception as e: print('CPriceMonitoring_Run Error :', e) Telegram('[StockTrader]CPriceMonitoring_Run Error : %s' % e, send='mc') logger.error('CPriceMonitoring_Run Error : %s' % e) else: Telegram("[StockTrader]%s ROBOT ์‹คํ–‰ ์ค‘์ง€" % (self.sName)) ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL') self.KiwoomDisConnect() # ๋กœ๋ด‡ ํด๋ž˜์Šค ๋‚ด์—์„œ ์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต ๋ฐ์ดํ„ฐ๋ฅผ ๋ฐ›๊ณ ๋‚˜์„œ ์—ฐ๊ฒฐ ํ•ด์ œ์‹œํ‚ด # ๋ฉ”์ธ ํ™”๋ฉด์— ๋ฐ˜์˜ self.parent.RobotView() ################################################################################## # ๋ฉ”์ธ ################################################################################## Ui_MainWindow, QtBaseClass_MainWindow = uic.loadUiType("./UI/XTrader_MainWindow.ui") class MainWindow(QMainWindow, Ui_MainWindow): def __init__(self): # ํ™”๋ฉด์„ ๋ณด์—ฌ์ฃผ๊ธฐ ์œ„ํ•œ ์ฝ”๋“œ super().__init__() QMainWindow.__init__(self) Ui_MainWindow.__init__(self) self.UI_setting() # ํ˜„์žฌ ์‹œ๊ฐ„ ๋ฐ›์Œ self.์‹œ์ž‘์‹œ๊ฐ = datetime.datetime.now() # ๋ฉ”์ธ์œˆ๋„์šฐ๊ฐ€ ๋œจ๊ณ  ํ‚ค์›€์ฆ๊ถŒ๊ณผ ๋ถ™์ด๊ธฐ ์œ„ํ•œ ์ž‘์—… self.KiwoomAPI() # ํ‚ค์›€ ActiveX๋ฅผ ๋ฉ”๋ชจ๋ฆฌ์— ์˜ฌ๋ฆผ self.KiwoomConnect() # ๋ฉ”๋ชจ๋ฆฌ์— ์˜ฌ๋ผ์˜จ ActiveX์™€ ๋‚ด๊ฐ€ ๋งŒ๋“  ํ•จ์ˆ˜ On์‹œ๋ฆฌ์ฆˆ์™€ ์—ฐ๊ฒฐ(์ฝœ๋ฐฑ : ์ด๋ฒคํŠธ๊ฐ€ ์˜ค๋ฉด ๋‚˜๋ฅผ ๋ถˆ๋Ÿฌ์ค˜) self.ScreenNumber = 5000 self.robots = [] self.dialog = dict() # self.dialog['๋ฆฌ์–ผ๋ฐ์ดํƒ€'] = None # self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'] = None self.model = MonkeyModel() self.tableView_robot.setModel(self.model) self.tableView_robot.setSelectionBehavior(QTableView.SelectRows) self.tableView_robot.setSelectionMode(QTableView.SingleSelection) self.tableView_robot.pressed.connect(self.RobotCurrentIndex) # self.connect(self.tableView_robot.selectionModel(), SIGNAL("currentRowChanged(QModelIndex,QModelIndex)"), self.RobotCurrentIndex) self.tableView_robot_current_index = None self.portfolio_model = MonkeyModel() self.tableView_portfolio.setModel(self.portfolio_model) self.tableView_portfolio.setSelectionBehavior(QTableView.SelectRows) self.tableView_portfolio.setSelectionMode(QTableView.SingleSelection) # self.portfolio_model.umkate((KnowledgeFrame(columns=['์ข…๋ชฉ์ฝ”๋“œ', '์ข…๋ชฉ๋ช…', '๋งค์ˆ˜๊ฐ€', '์ˆ˜๋Ÿ‰', '๋งค์ˆ˜์ผ']))) self.robot_columns = ['Robotํƒ€์ž…', 'Robot๋ช…', 'RobotID', '์Šคํฌ๋ฆฐ๋ฒˆํ˜ธ', '์‹คํ–‰์ƒํƒœ', 'ํฌํŠธ์ˆ˜', 'ํฌํŠธํด๋ฆฌ์˜ค'] # TODO: ์ฃผ๋ฌธ์ œํ•œ ์„ค์ • self.timer = QTimer(self) self.timer.timeout.connect(self.limit_per_second) # ์ดˆ๋‹น 4๋ฒˆ # QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.limit_per_second) self.timer.start(1000) # 1์ดˆ๋งˆ๋‹ค ๋ฆฌ์…‹ self.ConditionTick = QTimer(self) self.ConditionTick.timeout.connect(self.OnConditionCheck) self.์ฃผ๋ฌธ์ œํ•œ = 0 self.์กฐํšŒ์ œํ•œ = 0 self.๊ธˆ์ผ๋ฐฑ์—…์ž‘์—…์ค‘ = False self.์ข…๋ชฉ์„ ์ •์ž‘์—…์ค‘ = False self.ConditionCheck = False self.์กฐ๊ฑด์‹์ €์žฅ์นด์šดํŠธ = 1 self.DailyData = False # ๊ด€์‹ฌ์ข…๋ชฉ ์ผ๋ด‰ ์—…๋ฐ์ดํŠธ self.InvestorData = False # ๊ด€์‹ฌ์ข…๋ชฉ ์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž ์—…๋ฐ์ดํŠธ self.kf_daily = KnowledgeFrame() self.kf_weekly = KnowledgeFrame() self.kf_monthly = KnowledgeFrame() self.kf_investor = KnowledgeFrame() self._login = False self.KiwoomLogin() # ํ”„๋กœ๊ทธ๋žจ ์‹คํ–‰ ์‹œ ์ž๋™๋กœ๊ทธ์ธ self.CODE_POOL = self.getting_code_pool() # DB ์ข…๋ชฉ๋ฐ์ด๋ธ”์—์„œ ์‹œ์žฅ๊ตฌ๋ถ„, ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€ ์ฝ์–ด์˜ด # ํ™”๋ฉด Setting def UI_setting(self): self.setupUi(self) self.setWindowTitle("XTrader") self.setWindowIcon(QIcon('./PNG/icon_stock.png')) self.actionLogin.setIcon(QIcon('./PNG/Internal.png')) self.actionLogout.setIcon(QIcon('./PNG/External.png')) self.actionExit.setIcon(QIcon('./PNG/Approval.png')) self.actionAccountDialog.setIcon(QIcon('./PNG/Sales Performance.png')) self.actionMinutePrice.setIcon(QIcon('./PNG/Candle Sticks.png')) self.actionDailyPrice.setIcon(QIcon('./PNG/Overtime.png')) self.actionInvestors.setIcon(QIcon('./PNG/Conference Ctotal_all.png')) self.actionSectorView.setIcon(QIcon('./PNG/Organization.png')) self.actionSectorPriceView.setIcon(QIcon('./PNG/Ratings.png')) self.actionCodeBuild.setIcon(QIcon('./PNG/Inspection.png')) self.actionRobotOneRun.setIcon(QIcon('./PNG/Process.png')) self.actionRobotOneStop.setIcon(QIcon('./PNG/Cancel 2.png')) self.actionRobotMonitoringStop.setIcon(QIcon('./PNG/Cancel File.png')) self.actionRobotRun.setIcon(QIcon('./PNG/Checked.png')) self.actionRobotStop.setIcon(QIcon('./PNG/Cancel.png')) self.actionRobotRemove.setIcon(QIcon('./PNG/Delete File.png')) self.actionRobotClear.setIcon(QIcon('./PNG/Empty Trash.png')) self.actionRobotView.setIcon(QIcon('./PNG/Checked 2.png')) self.actionRobotSave.setIcon(QIcon('./PNG/Download.png')) self.actionTradeShortTerm.setIcon(QIcon('./PNG/Bullish.png')) self.actionTradeCondition.setIcon(QIcon('./PNG/Search.png')) self.actionConditionMonitoring.setIcon(QIcon('./PNG/Binoculars.png')) # ์ข…๋ชฉ ์„ ์ • def stock_analysis(self): try: self.AnalysisPriceList = self.AnalysisPriceList except: for robot in self.robots: if robot.sName == 'TradeShortTerm': self.AnalysisPriceList = robot.Stocklist['์ „๋žต']['์‹œ์„ธ์กฐํšŒ๋‹จ์œ„'] self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ = mk.KnowledgeFrame(shortterm_analysis_sheet.getting_total_all_records()) # shortterm_analysis_sheet self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ = self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ[['๋ฒˆํ˜ธ', '์ข…๋ชฉ๋ช…']] row = [] # print(self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ) for name in self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ['์ข…๋ชฉ๋ช…'].values: try: code, name, market = getting_code(name) except Exception as e: code = '' print('getting_code Error :', name, e) row.adding(code) self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ['์ข…๋ชฉ์ฝ”๋“œ'] = row self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ = self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ[self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ['์ข…๋ชฉ์ฝ”๋“œ'] != ''] print(self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ) self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ = list(self.์ข…๋ชฉ์„ ์ •๋ฐ์ดํ„ฐ[['๋ฒˆํ˜ธ', '์ข…๋ชฉ๋ช…', '์ข…๋ชฉ์ฝ”๋“œ']].values) self.์ข…๋ชฉ์ฝ”๋“œ = self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ.pop(0) if self.DailyData == True: self.start = datetime.datetime.now() print(self.start) self.ReguestPriceDaily() elif self.InvestorData == True: self.RequestInvestorDaily() elif self.WeeklyData == True: self.ReguestPriceWeekly() elif self.MonthlyData == True: self.ReguestPriceMonthly() # ์ผ๋ด‰๋ฐ์ดํ„ฐ์กฐํฌ def ReguestPriceDaily(self, _repeat=0): try: ๊ธฐ์ค€์ผ์ž = datetime.date.today().strftime('%Y%m%d') self.์ข…๋ชฉ์ผ๋ด‰ = [] ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", self.์ข…๋ชฉ์ฝ”๋“œ[2]) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ธฐ์ค€์ผ์ž", ๊ธฐ์ค€์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ˆ˜์ •์ฃผ๊ฐ€๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ฃผ์‹์ผ๋ด‰์ฐจํŠธ์กฐํšŒ", "OPT10081", _repeat, '{:04d}'.formating(self.ScreenNumber)) self.statusbar.showMessage("๊ด€์‹ฌ์ข…๋ชฉ ์ผ๋ด‰ ๋ฐ์ดํ„ฐ : %s %s %s" % (self.์ข…๋ชฉ์ฝ”๋“œ[0], self.์ข…๋ชฉ์ฝ”๋“œ[1], self.์ข…๋ชฉ์ฝ”๋“œ[2])) except Exception as e: print(e) # ์ฃผ๋ด‰๋ฐ์ดํ„ฐ์กฐํšŒ def ReguestPriceWeekly(self, _repeat=0): try: ๊ธฐ์ค€์ผ์ž = datetime.date.today().strftime('%Y%m%d') self.์ข…๋ชฉ์ฃผ๋ด‰ = [] ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", self.์ข…๋ชฉ์ฝ”๋“œ[2]) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ธฐ์ค€์ผ์ž", ๊ธฐ์ค€์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ˆ˜์ •์ฃผ๊ฐ€๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ฃผ์‹์ฃผ๋ด‰์ฐจํŠธ์กฐํšŒ", "OPT10082", _repeat, '{:04d}'.formating(self.ScreenNumber)) self.statusbar.showMessage("๊ด€์‹ฌ์ข…๋ชฉ ์ฃผ๋ด‰ ๋ฐ์ดํ„ฐ : %s %s %s" % (self.์ข…๋ชฉ์ฝ”๋“œ[0], self.์ข…๋ชฉ์ฝ”๋“œ[1], self.์ข…๋ชฉ์ฝ”๋“œ[2])) except Exception as e: print(e) # ์›”๋ด‰๋ฐ์ดํ„ฐ์กฐํšŒ def ReguestPriceMonthly(self, _repeat=0): try: ๊ธฐ์ค€์ผ์ž = datetime.date.today().strftime('%Y%m%d') self.์ข…๋ชฉ์›”๋ด‰ = [] ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", self.์ข…๋ชฉ์ฝ”๋“œ[2]) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ธฐ์ค€์ผ์ž", ๊ธฐ์ค€์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ˆ˜์ •์ฃผ๊ฐ€๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ฃผ์‹์›”๋ด‰์ฐจํŠธ์กฐํšŒ", "OPT10083", _repeat, '{:04d}'.formating(self.ScreenNumber)) self.statusbar.showMessage("๊ด€์‹ฌ์ข…๋ชฉ ์›”๋ด‰ ๋ฐ์ดํ„ฐ : %s %s %s" % (self.์ข…๋ชฉ์ฝ”๋“œ[0], self.์ข…๋ชฉ์ฝ”๋“œ[1], self.์ข…๋ชฉ์ฝ”๋“œ[2])) except Exception as e: print(e) # ์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์กฐํฌ def RequestInvestorDaily(self, _repeat=0): ๊ธฐ์ค€์ผ์ž = datetime.date.today().strftime('%Y%m%d') self.์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž = [] try: ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ผ์ž", ๊ธฐ์ค€์ผ์ž) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", self.์ข…๋ชฉ์ฝ”๋“œ[2]) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, int)', "๊ธˆ์•ก์ˆ˜๋Ÿ‰๊ตฌ๋ถ„", 2) # 1:๊ธˆ์•ก, 2:์ˆ˜๋Ÿ‰ ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, int)', "๋งค๋งค๊ตฌ๋ถ„", 0) # 0:์ˆœ๋งค์ˆ˜, 1:๋งค์ˆ˜, 2:๋งค๋„ ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, int)', "๋‹จ์œ„๊ตฌ๋ถ„", 1) # 1000:์ฒœ์ฃผ, 1:๋‹จ์ฃผ ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์กฐํšŒ", "OPT10060", _repeat, '{:04d}'.formating(self.ScreenNumber)) self.statusbar.showMessage("๊ด€์‹ฌ์ข…๋ชฉ ์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž ๋ฐ์ดํ„ฐ : %s %s %s" % (self.์ข…๋ชฉ์ฝ”๋“œ[0], self.์ข…๋ชฉ์ฝ”๋“œ[1], self.์ข…๋ชฉ์ฝ”๋“œ[2])) except Exception as e: print(e) # DB ๋ฐ์ดํ„ฐ ์ €์žฅ def UploadAnalysisData(self, data, ๊ตฌ๋ถ„): # shortterm_analysis_sheet = test_analysis_sheet row = [] if ๊ตฌ๋ถ„ == '์ผ๋ด‰': try: data['์ผ๋ด‰1'] = data['ํ˜„์žฌ๊ฐ€'].rolling(window=self.AnalysisPriceList[0]).average() data['์ผ๋ด‰2'] = data['ํ˜„์žฌ๊ฐ€'].rolling(window=self.AnalysisPriceList[1]).average() data['์ผ๋ด‰3'] = data['ํ˜„์žฌ๊ฐ€'].rolling(window=self.AnalysisPriceList[2]).average() data['์ผ๋ด‰4'] = data['ํ˜„์žฌ๊ฐ€'].rolling(window=self.AnalysisPriceList[3]).average() result = data.iloc[-1].values # ๊ตฌ๊ธ€ ์—…๋กœ๋“œ # row.adding(self.์ข…๋ชฉ์ฝ”๋“œ[0]) # row.adding(str(value_round((result[3] / int(result[1]) - 1) * 100, 2)) + '%') # row.adding(str(value_round((result[4] / int(result[1]) - 1) * 100, 2)) + '%') # row.adding(str(value_round((result[5] / int(result[1]) - 1) * 100, 2)) + '%') # row.adding(str(value_round((result[6] / int(result[1]) - 1) * 100, 2)) + '%') # row.adding(str(value_round((int(data.iloc[-2]['๊ฑฐ๋ž˜๋Ÿ‰']) / int(data.iloc[-1]['๊ฑฐ๋ž˜๋Ÿ‰']) - 1) * 100, 2)) + '%') # print(row) # # code_row = shortterm_analysis_sheet.findtotal_all(row[0])[-1].row # # cell = alpha_list[shortterm_analysis_cols.index('์ผ๋ด‰1')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[1]) # cell = alpha_list[shortterm_analysis_cols.index('์ผ๋ด‰2')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[2]) # cell = alpha_list[shortterm_analysis_cols.index('์ผ๋ด‰3')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[3]) # cell = alpha_list[shortterm_analysis_cols.index('์ผ๋ด‰4')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[4]) # cell = alpha_list[shortterm_analysis_cols.index('๊ฑฐ๋ž˜๋Ÿ‰')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[5]) # DB ์ €์žฅ dict = {'๋ฒˆํ˜ธ': [], '์ข…๋ชฉ๋ช…': [], '์ข…๋ชฉ์ฝ”๋“œ': [], '์ผ๋ด‰1': [], '์ผ๋ด‰2': [], '์ผ๋ด‰3': [], '์ผ๋ด‰4': [], '๊ฑฐ๋ž˜๋Ÿ‰': []} dict['๋ฒˆํ˜ธ'].adding(str(self.์ข…๋ชฉ์ฝ”๋“œ[0])) dict['์ข…๋ชฉ๋ช…'].adding(self.์ข…๋ชฉ์ฝ”๋“œ[1]) dict['์ข…๋ชฉ์ฝ”๋“œ'].adding(self.์ข…๋ชฉ์ฝ”๋“œ[2]) dict['์ผ๋ด‰1'].adding(str(value_round((result[3] / int(result[1]) - 1) * 100, 2)) + '%') dict['์ผ๋ด‰2'].adding(str(value_round((result[4] / int(result[1]) - 1) * 100, 2)) + '%') dict['์ผ๋ด‰3'].adding(str(value_round((result[5] / int(result[1]) - 1) * 100, 2)) + '%') dict['์ผ๋ด‰4'].adding(str(value_round((result[6] / int(result[1]) - 1) * 100, 2)) + '%') dict['๊ฑฐ๋ž˜๋Ÿ‰'].adding( str(value_round((int(data.iloc[-2]['๊ฑฐ๋ž˜๋Ÿ‰']) / int(data.iloc[-1]['๊ฑฐ๋ž˜๋Ÿ‰']) - 1) * 100, 2)) + '%') temp = KnowledgeFrame(dict) self.kf_daily = mk.concating([self.kf_daily, temp]) except Exception as e: print('UploadDailyPriceData Error : ', e) elif ๊ตฌ๋ถ„ == '์ฃผ๋ด‰': try: data['์ฃผ๋ด‰1'] = data['ํ˜„์žฌ๊ฐ€'].rolling(window=self.AnalysisPriceList[4]).average() result = data.iloc[-1].values # ๊ตฌ๊ธ€ ์—…๋กœ๋“œ # row.adding(self.์ข…๋ชฉ์ฝ”๋“œ[0]) # row.adding(str(value_round((result[2] / int(result[1]) - 1) * 100, 2)) + '%') # print(row) # # code_row = shortterm_analysis_sheet.findtotal_all(row[0])[-1].row # # cell = alpha_list[shortterm_analysis_cols.index('์ฃผ๋ด‰1')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[1]) # DB ์ €์žฅ dict = {'์ข…๋ชฉ์ฝ”๋“œ': [], '์ฃผ๋ด‰1': [] } dict['์ข…๋ชฉ์ฝ”๋“œ'].adding(self.์ข…๋ชฉ์ฝ”๋“œ[2]) dict['์ฃผ๋ด‰1'].adding(str(value_round((result[2] / int(result[1]) - 1) * 100, 2)) + '%') temp = KnowledgeFrame(dict) self.kf_weekly = mk.concating([self.kf_weekly, temp]) except Exception as e: print('UploadWeeklyPriceData Error : ', e) elif ๊ตฌ๋ถ„ == '์›”๋ด‰': try: data['์›”๋ด‰1'] = data['ํ˜„์žฌ๊ฐ€'].rolling(window=self.AnalysisPriceList[5]).average() result = data.iloc[-1].values # ๊ตฌ๊ธ€ ์—…๋กœ๋“œ # row.adding(self.์ข…๋ชฉ์ฝ”๋“œ[0]) # row.adding(str(value_round((result[2] / int(result[1]) - 1) * 100, 2)) + '%') # print(row) # # code_row = shortterm_analysis_sheet.findtotal_all(row[0])[-1].row # # cell = alpha_list[shortterm_analysis_cols.index('์›”๋ด‰1')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[1]) # DB ์ €์žฅ dict = {'์ข…๋ชฉ์ฝ”๋“œ': [], '์›”๋ด‰1': [] } dict['์ข…๋ชฉ์ฝ”๋“œ'].adding(self.์ข…๋ชฉ์ฝ”๋“œ[2]) dict['์›”๋ด‰1'].adding(str(value_round((result[2] / int(result[1]) - 1) * 100, 2)) + '%') temp = KnowledgeFrame(dict) self.kf_monthly = mk.concating([self.kf_monthly, temp]) except Exception as e: print('UploadmonthlyPriceData Error : ', e) elif ๊ตฌ๋ถ„ == '์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž': try: result = data.iloc[-1].values # ๊ตฌ๊ธ€ ์—…๋กœ๋“œ # row.adding(self.์ข…๋ชฉ์ฝ”๋“œ[0]) # row.adding(result[1]) # ๊ธฐ๊ด€ # row.adding(result[2]) # ์™ธ๊ตญ์ธ # row.adding(result[3]) # ๊ฐœ์ธ # print(row) # # code_row = shortterm_analysis_sheet.findtotal_all(row[0])[-1].row # # cell = alpha_list[shortterm_analysis_cols.index('๊ธฐ๊ด€์ˆ˜๊ธ‰')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[1]) # cell = alpha_list[shortterm_analysis_cols.index('์™ธ์ธ์ˆ˜๊ธ‰')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[2]) # cell = alpha_list[shortterm_analysis_cols.index('๊ฐœ์ธ')] + str(code_row) # shortterm_analysis_sheet.umkate_acell(cell, row[3]) # DB ์ €์žฅ dict = {'์ข…๋ชฉ์ฝ”๋“œ': [], '๊ธฐ๊ด€': [], '์™ธ์ธ': [], '๊ฐœ์ธ': []} dict['์ข…๋ชฉ์ฝ”๋“œ'].adding(self.์ข…๋ชฉ์ฝ”๋“œ[2]) dict['๊ธฐ๊ด€'].adding(result[1]) # ๊ธฐ๊ด€ dict['์™ธ์ธ'].adding(result[2]) # ์™ธ๊ตญ์ธ dict['๊ฐœ์ธ'].adding(result[3]) # ๊ฐœ์ธ temp = KnowledgeFrame(dict) self.kf_investor = mk.concating([self.kf_investor, temp]) except Exception as e: print('UploadDailyInvestorData Error : ', e) # DB์— ์ €์žฅ๋œ ์ƒ์žฅ ์ข…๋ชฉ ์ฝ”๋“œ ์ฝ์Œ def getting_code_pool(self): query = """ select ์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€, ์ „์ผ์ข…๊ฐ€*์ฃผ์‹์ˆ˜ as ์‹œ๊ฐ€์ด์•ก from ์ข…๋ชฉ์ฝ”๋“œ order by ์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ์ฝ”๋“œ """ conn = sqliteconn() kf = mk.read_sql(query, con=conn) conn.close() pool = dict() for idx, row in kf.traversal(): ์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€, ์‹œ๊ฐ€์ด์•ก = row pool[์ข…๋ชฉ์ฝ”๋“œ] = [์‹œ์žฅ๊ตฌ๋ถ„, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€, ์‹œ๊ฐ€์ด์•ก] return pool # ๊ตฌ๊ธ€์Šคํ”„๋ ˆ๋“œ์‹œํŠธ ์ข…๋ชฉ Import def Import_ShortTermStock(self, check): try: data = import_googlesheet() if check == False: # # ๋งค์ˆ˜ ์ „๋žต๋ณ„ ๋ณ„๋„ ๋กœ๋ด‡ ์šด์˜ ์‹œ # # ๋งค์ˆ˜ ์ „๋žต ํ™•์ธ # strategy_list = list(data['๋งค์ˆ˜์ „๋žต'].distinctive()) # # # ๋กœ๋”ฉ๋œ ๋กœ๋ด‡์„ robot_list์— ์ €์žฅ # robot_list = [] # for robot in self.robots: # robot_list.adding(robot.sName.split('_')[0]) # # # ๋งค์ˆ˜ ์ „๋žต๋ณ„ ๋กœ๋ด‡ ์ž๋™ ํŽธ์ง‘/์ถ”๊ฐ€ # for strategy in strategy_list: # kf_stock = data[data['๋งค์ˆ˜์ „๋žต'] == strategy] # # if strategy in robot_list: # print('๋กœ๋ด‡ ํŽธ์ง‘') # Telegram('[StockTrader]๋กœ๋ด‡ ํŽธ์ง‘') # for robot in self.robots: # if robot.sName.split('_')[0] == strategy: # self.RobotAutoEdit_TradeShortTerm(robot, kf_stock) # self.RobotView() # break # else: # print('๋กœ๋ด‡ ์ถ”๊ฐ€') # Telegram('[StockTrader]๋กœ๋ด‡ ์ถ”๊ฐ€') # self.RobotAutoAdd_TradeShortTerm(kf_stock, strategy) # self.RobotView() # ๋กœ๋”ฉ๋œ ๋กœ๋ด‡์„ robot_list์— ์ €์žฅ robot_list = [] for robot in self.robots: robot_list.adding(robot.sName) if 'TradeShortTerm' in robot_list: for robot in self.robots: if robot.sName == 'TradeShortTerm': print('๋กœ๋ด‡ ํŽธ์ง‘') logger.debug('๋กœ๋ด‡ ํŽธ์ง‘') self.RobotAutoEdit_TradeShortTerm(robot, data) self.RobotView() break else: print('๋กœ๋ด‡ ์ถ”๊ฐ€') logger.debug('๋กœ๋ด‡ ์ถ”๊ฐ€') self.RobotAutoAdd_TradeShortTerm(data) self.RobotView() # print("๋กœ๋ด‡ ์ค€๋น„ ์™„๋ฃŒ") # Slack('[XTrader]๋กœ๋ด‡ ์ค€๋น„ ์™„๋ฃŒ') # logger.info("๋กœ๋ด‡ ์ค€๋น„ ์™„๋ฃŒ") except Exception as e: print('MainWindow_Import_ShortTermStock Error', e) Telegram('[StockTrader]MainWindow_Import_ShortTermStock Error : %s' % e, send='mc') logger.error('MainWindow_Import_ShortTermStock Error : %s' % e) # ๊ธˆ์ผ ๋งค๋„ ์ข…๋ชฉ์— ๋Œ€ํ•ด์„œ ์ˆ˜์ต๋ฅ , ์ˆ˜์ต๊ธˆ, ์ˆ˜์ˆ˜๋ฃŒ ์š”์ฒญ(์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต์š”์ฒญ) # def DailyProfit(self, ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ): # _repeat = 0 # # self.sAccount = ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ # # self.sScreenNo = self.ScreenNumber # ์‹œ์ž‘์ผ์ž = datetime.date.today().strftime('%Y%m%d') # cnt=1 # for ์ข…๋ชฉ์ฝ”๋“œ in ๊ธˆ์ผ๋งค๋„์ข…๋ชฉ: # self.umkate_cnt = length(๊ธˆ์ผ๋งค๋„์ข…๋ชฉ) - cnt # cnt += 1 # ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ณ„์ขŒ๋ฒˆํ˜ธ", self.sAccount) # ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์ข…๋ชฉ์ฝ”๋“œ", ์ข…๋ชฉ์ฝ”๋“œ) # ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์‹œ์ž‘์ผ์ž", ์‹œ์ž‘์ผ์ž) # ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "์ผ์ž๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต์š”์ฒญ", "OPT10072", _repeat, '{:04d}'.formating(self.ScreenNumber)) # # self.DailyProfitLoop = QEventLoop() # ๋กœ๋ด‡์—์„œ ๋ฐ”๋กœ ์“ธ ์ˆ˜ ์žˆ๋„๋กํ•˜๊ธฐ ์œ„ํ•ด์„œ ๊ณ„์ขŒ ์กฐํšŒํ•ด์„œ ์ข…๋ชฉ์„ ๋ฐ›๊ณ ๋‚˜์„œ ๋ฃจํ”„ํ•ด์ œ์‹œํ‚ด # self.DailyProfitLoop.exec_() # ์ผ๋ณ„์ข…๋ชฉ๋ณ„์‹คํ˜„์†์ต ์‘๋‹ต ๊ฒฐ๊ณผ ๊ตฌ๊ธ€ ์—…๋กœ๋“œ # def DailyProfitUpload(self, ๋งค๋„๊ฒฐ๊ณผ): # # ๋งค๋„๊ฒฐ๊ณผ ['์ข…๋ชฉ๋ช…','์ฒด๊ฒฐ๋Ÿ‰','๋งค์ž…๋‹จ๊ฐ€','์ฒด๊ฒฐ๊ฐ€','๋‹น์ผ๋งค๋„์†์ต','์†์ต์œจ','๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ','๋‹น์ผ๋งค๋งค์„ธ๊ธˆ'] # print(๋งค๋„๊ฒฐ๊ณผ) # # for r in self.robots: # if r.sName == 'TradeShortTerm': # history_sheet = history_sheet # history_cols = history_cols # elif r.sName == 'TradeCondition': # history_sheet = condition_history_sheet # history_cols = condition_history_cols # # code_row = history_sheet.findtotal_all(๋งค๋„๊ฒฐ๊ณผ[0])[-1].row # # ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ  = value_round((int(float(๋งค๋„๊ฒฐ๊ณผ[3])) / int(float(๋งค๋„๊ฒฐ๊ณผ[2])) - 1) * 100, 2) # # cell = alpha_list[history_cols.index('๋งค์ˆ˜๊ฐ€')] + str(code_row) # ๋งค์ž…๋‹จ๊ฐ€ # history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[2]))) # # cell = alpha_list[history_cols.index('๋งค๋„๊ฐ€')] + str(code_row) # ์ฒด๊ฒฐ๊ฐ€ # history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[3]))) # # cell = alpha_list[history_cols.index('์ˆ˜์ต๋ฅ (๊ณ„์‚ฐ)')] + str(code_row) # ์ˆ˜์ต๋ฅ  ๊ณ„์‚ฐ # history_sheet.umkate_acell(cell, ๊ณ„์‚ฐ์ˆ˜์ต๋ฅ ) # # cell = alpha_list[history_cols.index('์ˆ˜์ต๋ฅ ')] + str(code_row) # ์†์ต์œจ # history_sheet.umkate_acell(cell, ๋งค๋„๊ฒฐ๊ณผ[5]) # # cell = alpha_list[history_cols.index('์ˆ˜์ต๊ธˆ')] + str(code_row) # ์†์ต์œจ # history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[4]))) # # cell = alpha_list[history_cols.index('์„ธ๊ธˆ+์ˆ˜์ˆ˜๋ฃŒ')] + str(code_row) # ๋‹น์ผ๋งค๋งค์ˆ˜์ˆ˜๋ฃŒ + ๋‹น์ผ๋งค๋งค์„ธ๊ธˆ # history_sheet.umkate_acell(cell, int(float(๋งค๋„๊ฒฐ๊ณผ[6])) + int(float(๋งค๋„๊ฒฐ๊ณผ[7]))) # # self.DailyProfitLoop.exit() # # if self.umkate_cnt == 0: # print('๊ธˆ์ผ ์‹คํ˜„ ์†์ต ๊ตฌ๊ธ€ ์—…๋กœ๋“œ ์™„๋ฃŒ') # Slack("[XTrader]๊ธˆ์ผ ์‹คํ˜„ ์†์ต ๊ตฌ๊ธ€ ์—…๋กœ๋“œ ์™„๋ฃŒ") # logger.info("[XTrader]๊ธˆ์ผ ์‹คํ˜„ ์†์ต ๊ตฌ๊ธ€ ์—…๋กœ๋“œ ์™„๋ฃŒ") # ์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ์ฝ์–ด์„œ ํ•ด๋‹น ์ข…๋ชฉ ์ €์žฅ def GetCondition(self): # logger.info("์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ์ข…๋ชฉ ์ฝ๊ธฐ") self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer) self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition) conditions = ['๋งค๋ฌผ๋Œ€๊ฑฐ๋ž˜๋Ÿ‰','์™ธ๊ตญ์ธ๊ธฐ๊ด€์ˆ˜๊ธ‰', '์ฃผ๋„์ฃผ', '๋‹น์ผ์ฃผ๋„์ฃผ', '๊ธฐ๋ณธ์ฃผ๋„์ฃผ','์Šคํ† ์บ์Šคํ‹ฑ&MACD&๊ฑฐ๋ž˜๋Ÿ‰ํšŒ์ „์œจ', '๊ฐญ์ƒ์Šน'] try: self.gettingConditionLoad() self.conditionid = [] self.conditionname = [] for index in self.condition.keys(): # condition์€ dictionary # print(self.condition) if self.condition[index] in conditions: self.conditionid.adding(str(index)) self.conditionname.adding(self.condition[index]) print('์กฐ๊ฑด ๊ฒ€์ƒ‰ ์‹œ์ž‘') print(index, self.condition[index]) self.sendCondition("0156", self.condition[index], index, 0) except Exception as e: print("GetCondition_Error") print(e) fintotal_ally: # print(self.kf_condition) query = """ select * from ์กฐ๊ฑด๊ฒ€์ƒ‰์‹ """ conn = sqliteconn() kf = mk.read_sql(query, con=conn) conn.close() kf = kf.sip_duplicates(['์นด์šดํŠธ', '์ข…๋ชฉ๋ช…'], keep='first') kf = kf.sort_the_values(by=['์นด์šดํŠธ','์ธ๋ฑ์Šค']).reseting_index(sip=True) savetime = today.strftime('%Y%m%d') + '_'+ current_time.replacing(':','') kf.to_csv(savetime +"_์กฐ๊ฑด๊ฒ€์ƒ‰์ข…๋ชฉ.csv", encoding='euc-kr', index=False) self.์กฐ๊ฑด์‹์ €์žฅ์นด์šดํŠธ += 1 self.ConditionCheck = False logger.info("์กฐ๊ฑด ๊ฒ€์ƒ‰์‹ ์ข…๋ชฉ ์ €์žฅ์™„๋ฃŒ") self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer) self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition) # ์กฐ๊ฑด์‹ ๋ชฉ๋ก ์š”์ฒญ ๋ฉ”์„œ๋“œ def gettingConditionLoad(self): self.kiwoom.dynamicCtotal_all("GetConditionLoad()") # receiveConditionVer() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ self.conditionLoop = QEventLoop() self.conditionLoop.exec_() # ์กฐ๊ฑด์‹ ํš๋“ ๋ฉ”์„œ๋“œ def gettingConditionNameList(self): # ์กฐ๊ฑด์‹์„ ๋”•์…”๋„ˆ๋ฆฌ ํ˜•ํƒœ๋กœ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. # ์ด ๋ฉ”์„œ๋“œ๋Š” ๋ฐ˜๋“œ์‹œ receiveConditionVer() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์•ˆ์—์„œ ์‚ฌ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. # # :return: dict - {์ธ๋ฑ์Šค:์กฐ๊ฑด๋ช…, ์ธ๋ฑ์Šค:์กฐ๊ฑด๋ช…, ...} data = self.kiwoom.dynamicCtotal_all("GetConditionNameList()") conditionList = data.split(';') del conditionList[-1] conditionDictionary = {} for condition in conditionList: key, value = condition.split('^') conditionDictionary[int(key)] = value return conditionDictionary # ์ข…๋ชฉ ์กฐ๊ฑด๊ฒ€์ƒ‰ ์š”์ฒญ ๋ฉ”์„œ๋“œ def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime): # ์ด ๋ฉ”์„œ๋“œ๋กœ ์–ป๊ณ ์ž ํ•˜๋Š” ๊ฒƒ์€ ํ•ด๋‹น ์กฐ๊ฑด์— ๋งž๋Š” ์ข…๋ชฉ์ฝ”๋“œ์ด๋‹ค. # ํ•ด๋‹น ์ข…๋ชฉ์— ๋Œ€ํ•œ ์ƒ์„ธ์ •๋ณด๋Š” setRealReg() ๋ฉ”์„œ๋“œ๋กœ ์š”์ฒญํ•  ์ˆ˜ ์žˆ๋‹ค. # ์š”์ฒญ์ด ์‹คํŒจํ•˜๋Š” ๊ฒฝ์šฐ๋Š”, ํ•ด๋‹น ์กฐ๊ฑด์‹์ด ์—†๊ฑฐ๋‚˜, ์กฐ๊ฑด๋ช…๊ณผ ์ธ๋ฑ์Šค๊ฐ€ ๋งž์ง€ ์•Š๊ฑฐ๋‚˜, ์กฐํšŒ ํšŸ์ˆ˜๋ฅผ ์ดˆ๊ณผํ•˜๋Š” ๊ฒฝ์šฐ ๋ฐœ์ƒํ•œ๋‹ค. # # ์กฐ๊ฑด๊ฒ€์ƒ‰์— ๋Œ€ํ•œ ๊ฒฐ๊ณผ๋Š” # 1ํšŒ์„ฑ ์กฐํšŒ์˜ ๊ฒฝ์šฐ, receiveTrCondition() ์ด๋ฒคํŠธ๋กœ ๊ฒฐ๊ณผ๊ฐ’์ด ์ „๋‹ฌ๋˜๋ฉฐ # ์‹ค์‹œ๊ฐ„ ์กฐํšŒ์˜ ๊ฒฝ์šฐ, receiveTrCondition()๊ณผ receiveRealCondition() ์ด๋ฒคํŠธ๋กœ ๊ฒฐ๊ณผ๊ฐ’์ด ์ „๋‹ฌ๋œ๋‹ค. # # :param screenNo: string # :param conditionName: string - ์กฐ๊ฑด์‹ ์ด๋ฆ„ # :param conditionIndex: int - ์กฐ๊ฑด์‹ ์ธ๋ฑ์Šค # :param isRealTime: int - ์กฐ๊ฑด๊ฒ€์ƒ‰ ์กฐํšŒ๊ตฌ๋ถ„(0: 1ํšŒ์„ฑ ์กฐํšŒ, 1: ์‹ค์‹œ๊ฐ„ ์กฐํšŒ) isRequest = self.kiwoom.dynamicCtotal_all("SendCondition(QString, QString, int, int)", screenNo, conditionName, conditionIndex, isRealTime) # receiveTrCondition() ์ด๋ฒคํŠธ ๋ฉ”์„œ๋“œ์—์„œ ๋ฃจํ”„ ์ข…๋ฃŒ self.conditionLoop = QEventLoop() self.conditionLoop.exec_() # ํ”„๋กœ๊ทธ๋žจ ์‹คํ–‰ 3์ดˆ ํ›„ ์‹คํ–‰ def OnQApplicationStarted(self): # 1. 8์‹œ 58๋ถ„ ์ด์ „์ผ ๊ฒฝ์šฐ 5๋ถ„ ๋‹จ์œ„ ๊ตฌ๊ธ€์‹œํŠธ ์˜คํ“จ ์ฒดํฌ ํƒ€์ด๋จธ ์‹œ์ž‘์‹œํ‚ด current = datetime.datetime.now() current_time = current.strftime('%H:%M:%S') """ if '07:00:00' <= current_time and current_time <= '08:58:00': print('๊ตฌ๊ธ€ ์‹œํŠธ ์˜ค๋ฅ˜ ์ฒดํฌ ์‹œ์ž‘') # Telegram('[StockTrader]๊ตฌ๊ธ€ ์‹œํŠธ ์˜ค๋ฅ˜ ์ฒดํฌ ์‹œ์ž‘') self.statusbar.showMessage("๊ตฌ๊ธ€ ์‹œํŠธ ์˜ค๋ฅ˜ ์ฒดํฌ ์‹œ์ž‘") self.checkclock = QTimer(self) self.checkclock.timeout.connect(self.OnGoogleCheck) # 5๋ถ„๋งˆ๋‹ค ๊ตฌ๊ธ€ ์‹œํŠธ ์ฝ์Œ : MainWindow.OnGoogleCheck ์‹คํ–‰ self.checkclock.start(300000) # 300000์ดˆ๋งˆ๋‹ค ํƒ€์ด๋จธ ์ž‘๋™ """ # 2. DB์— ์ €์žฅ๋œ ๋กœ๋ด‡ ์ •๋ณด๋ฐ›์•„์˜ด global ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ try: with sqlite3.connect(DATABASE) as conn: cursor = conn.cursor() cursor.execute("select value from Setting where keyword='robotaccount'") for row in cursor.fetchtotal_all(): # _temp = base64.decodestring(row[0]) # base64์— textํ™”ํ•ด์„œ ์•”ํ˜ธํ™” : DB์— ์ž˜ ๋„ฃ๊ธฐ ์œ„ํ•จ _temp = base64.decodebytes(row[0]) ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ = pickle.loads(_temp) print('๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ', ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ) cursor.execute('select uuid, strategy, name, robot from Robots') self.robots = [] for row in cursor.fetchtotal_all(): uuid, strategy, name, robot_encoded = row robot = base64.decodebytes(robot_encoded) # r = base64.decodebytes(robot_encoded) r = pickle.loads(robot) r.kiwoom = self.kiwoom r.parent = self r.d = today r.running = False # logger.debug(r.sName, r.UUID, length(r.portfolio)) self.robots.adding(r) except Exception as e: print('OnQApplicationStarted', e) self.RobotView() # ํ”„๋กœ๊ทธ๋žจ ์‹คํ–‰ ํ›„ 1์ดˆ ๋งˆ๋‹ค ์‹คํ–‰ : ์กฐ๊ฑด์— ๋งž๋Š” ์‹œ๊ฐ„์ด ๋˜๋ฉด ๋ฐฑ์—… ์‹œ์ž‘ def OnClockTick(self): current = datetime.datetime.now() global current_time current_time = current.strftime('%H:%M:%S') # 8์‹œ 32๋ถ„ : ์ข…๋ชฉ ๋ฐ์ด๋ธ” ์ƒ์„ฑ if current_time == '08:32:00': print('์ข…๋ชฉํ…Œ์ด๋ธ” ์ƒ์„ฑ') # Slack('[XTrader]์ข…๋ชฉํ…Œ์ด๋ธ” ์ƒ์„ฑ') self.StockCodeBuild(to_db=True) self.CODE_POOL = self.getting_code_pool() # DB ์ข…๋ชฉ๋ฐ์ด๋ธ”์—์„œ ์‹œ์žฅ๊ตฌ๋ถ„, ์ฝ”๋“œ, ์ข…๋ชฉ๋ช…, ์ฃผ์‹์ˆ˜, ์ „์ผ์ข…๊ฐ€ ์ฝ์–ด์˜ด self.statusbar.showMessage("์ข…๋ชฉํ…Œ์ด๋ธ” ์ƒ์„ฑ") """ # 8์‹œ 59๋ถ„ : ๊ตฌ๊ธ€ ์‹œํŠธ ์ข…๋ชฉ Import if current_time == '08:59:00': print('๊ตฌ๊ธ€ ์‹œํŠธ ์˜ค๋ฅ˜ ์ฒดํฌ ์ค‘์ง€') # Telegram('[StockTrader]๊ตฌ๊ธ€ ์‹œํŠธ ์˜ค๋ฅ˜ ์ฒดํฌ ์ค‘์ง€') self.checkclock.stop() robot_list = [] for robot in self.robots: robot_list.adding(robot.sName) if 'TradeShortTerm' in robot_list: print('๊ตฌ๊ธ€์‹œํŠธ Import') Telegram('[StockTrader]๊ตฌ๊ธ€์‹œํŠธ Import') self.Import_ShortTermStock(check=False) self.statusbar.showMessage('๊ตฌ๊ธ€์‹œํŠธ Import') """ # 8์‹œ 59๋ถ„ 30์ดˆ : ๋กœ๋ด‡ ์‹คํ–‰ if '09:00:00' <= current_time and current_time < '09:00:05': try: if length(self.robots) > 0: for r in self.robots: if r.running == False: # ๋กœ๋ด‡์ด ์‹คํ–‰์ค‘์ด ์•„๋‹ˆ๋ฉด r.Run(flag=True, sAccount=๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ) self.RobotView() except Exception as e: print('Robot Auto Run Error', e) Telegram('[StockTrader]Robot Auto Run Error : %s' % e, send='mc') logger.error('Robot Auto Run Error : %s' % e) # TradeShortTerm ๋ณด์œ ์ผ ๋งŒ๊ธฐ ๋งค๋„ ์ „๋žต ์ฒดํฌ์šฉ # if current_time >= '15:29:00' and current_time < '15:29:30': # if length(self.robots) > 0: # for r in self.robots: # if r.sName == 'TradeShortTerm': # if r.holdcheck == False: # r.holdcheck = True # r.hold_strategy() # 15์‹œ 17๋ถ„ :TradeCondition ๋‹น์ผ์ฒญ์‚ฐ ๋งค๋„ ์‹คํ–‰ if current_time >= '15:17:00' and current_time < '15:17:30': if length(self.robots) > 0: for r in self.robots: if r.sName == 'TradeCondition' and '๋‹น์ผ์ฒญ์‚ฐ' in r.์กฐ๊ฑด์‹๋ช…: if r.clearcheck == False: r.clearcheck = True r.clearning_strategy() # 16์‹œ 00๋ถ„ : ๋กœ๋ด‡ ์ •์ง€ if '15:40:00' <= current_time and current_time < '15:40:05': self.RobotStop() # 16์‹œ 05๋ถ„ : ํ”„๋กœ๊ทธ๋žจ ์ข…๋ฃŒ if '15:45:00' <= current_time and current_time < '15:45:05': quit() # 18์‹œ 00๋ถ„ : ์ข…๋ชฉ ๋ถ„์„์„ ์œ„ํ•œ ์ผ๋ด‰, ์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์ •๋ณด ์—…๋ฐ์ดํŠธ # if '18:00:00' <= current_time and current_time < '18:00:05': # if self.DailyData == False: # self.DailyData = True # self.WeeklyData = False # self.MonthlyData = False # self.InvestorData = False # Telegram("[XTrader]๊ด€์‹ฌ์ข…๋ชฉ ๋ฐ์ดํ„ฐ ์—…๋ฐ์ดํŠธ", send='mc') # self.stock_analysis() # if '153600' < current_time and current_time < '153659' and self.๊ธˆ์ผ๋ฐฑ์—…์ž‘์—…์ค‘ == False and self._login == True:# and current.weekday() == 4: # ์ˆ˜๋Šฅ์ผ์ด๋ฉด ์•„๋ž˜ ์‹œ๊ฐ„ ์กฐ๊ฑด์œผ๋กœ ์ˆ˜์ • # if '17:00:00' < current.strftime('%H:%M:%S') and current.strftime('%H:%M:%S') < '17:00:59' and self.๊ธˆ์ผ๋ฐฑ์—…์ž‘์—…์ค‘ == False and self._login == True: # self.๊ธˆ์ผ๋ฐฑ์—…์ž‘์—…์ค‘ = True # self.Backup(์ž‘์—…=None) # pass # ๋กœ๋ด‡์„ ์ €์žฅ # if self.์‹œ์ž‘์‹œ๊ฐ.strftime('%H:%M:%S') > '08:00:00' and self.์‹œ์ž‘์‹œ๊ฐ.strftime('%H:%M:%S') < '15:30:00' and current.strftime('%H:%M:%S') > '01:00:00': # if length(self.robots) > 0: # self.RobotSave() # for k in self.dialog: # self.dialog[k].KiwoomDisConnect() # try: # self.dialog[k].close() # except Exception as e: # pass # self.close() # ์ง€์ • ์‹œ๊ฐ„์— ๋กœ๋ด‡์„ ์ค‘์ง€ํ•œ๋‹ค๋˜๊ฐ€ ์›ํ•˜๋Š” ์‹คํ–‰์„ ์•„๋ž˜ pass์— ์ž‘์„ฑ # if current_time > '08:58:00' and current_time <= '15:30:00': # if current.second == 0 and current.getting_minute % 3 == 0 and self.ConditionCheck == False: # self.ConditionCheck = True # self.GetCondition() # if current.weekday() in workday_list: # ์ฃผ์ค‘์ธ์ง€ ํ™•์ธ # if current_time in savetime_list: # ์ง€์ •๋œ ์‹œ๊ฐ„์ธ์ง€ ํ™•์ธ # logger.info("์กฐ๊ฑด๊ฒ€์ƒ‰์‹ ํƒ€์ด๋จธ ์ž‘๋™") # Telegram(str(current)[:-7] + " : " + "์กฐ๊ฑด๊ฒ€์ƒ‰์‹ ์ข…๋ชฉ ๊ฒ€์ƒ‰") # self.GetCondition() # ์กฐ๊ฑด๊ฒ€์ƒ‰์‹์„ ๋ชจ๋‘ ์ฝ์–ด์„œ ํ•ด๋‹นํ•˜๋Š” ์ข…๋ชฉ ์ €์žฅ # if current.second == 0: # ๋งค 0์ดˆ # # if current.getting_minute % 10 == 0: # ๋งค 10 ๋ถ„ # if current.getting_minute == 1 or current.strftime('%H:%M:%S') == '09:30:00' or current.strftime('%H:%M:%S') == '15:15:00': # ๋งค์‹œ 1๋ถ„ # logger.info("์กฐ๊ฑด๊ฒ€์ƒ‰์‹ ํƒ€์ด๋จธ ์ž‘๋™") # Telegram(str(current)[:-7] + " : " + "์กฐ๊ฑด๊ฒ€์ƒ‰์‹ ์ข…๋ชฉ ๊ฒ€์ƒ‰") # # print(current.getting_minute, current.second) # self.GetCondition() # ์กฐ๊ฑด๊ฒ€์ƒ‰์‹์„ ๋ชจ๋‘ ์ฝ์–ด์„œ ํ•ด๋‹นํ•˜๋Š” ์ข…๋ชฉ ์ €์žฅ # for r in self.robots: # if r.running == True: # ๋กœ๋ด‡์ด ์‹คํ–‰์ค‘์ด๋ฉด # # print(r.sName, r.running) # pass # ์ฃผ๋ฌธ ์ œํ•œ ์ดˆ๊ธฐํ™” def limit_per_second(self): self.์ฃผ๋ฌธ์ œํ•œ = 0 self.์กฐํšŒ์ œํ•œ = 0 # logger.info("์ดˆ๋‹น์ œํ•œ ์ฃผ๋ฌธ ํด๋ฆฌ์–ด") def OnConditionCheck(self): try: current = datetime.datetime.now() if current.second == 0 and current.getting_minute % 3 == 0: for robot in self.robots: if 'TradeCondition' in robot.sName: if robot.์กฐ๊ฑด๊ฒ€์ƒ‰ํƒ€์ž… == 0: robot.ConditionCheck() except Exception as e: print(e) # 5๋ถ„ ๋งˆ๋‹ค ์‹คํ–‰ : ๊ตฌ๊ธ€ ์Šคํ”„๋ ˆ๋“œ ์‹œํŠธ ์˜ค๋ฅ˜ ํ™•์ธ def OnGoogleCheck(self): self.Import_ShortTermStock(check=True) # ๋ฉ”์ธ ์œˆ๋„์šฐ์—์„œ์˜ ๋ชจ๋“  ์•ก์…˜์— ๋Œ€ํ•œ ์ฒ˜๋ฆฌ def MENU_Action(self, qaction): logger.debug("Action Slot %s %s " % (qaction.objectName(), qaction.text())) try: _action = qaction.objectName() if _action == "actionExit": if length(self.robots) > 0: self.RobotSave() for k in self.dialog: self.dialog[k].KiwoomDisConnect() try: self.dialog[k].close() except Exception as e: pass self.close() elif _action == "actionLogin": self.KiwoomLogin() elif _action == "actionLogout": self.KiwoomLogout() elif _action == "actionDailyPrice": # self.F_dailyprice() if self.dialog.getting('์ผ์ž๋ณ„์ฃผ๊ฐ€') is not None: try: self.dialog['์ผ์ž๋ณ„์ฃผ๊ฐ€'].show() except Exception as e: self.dialog['์ผ์ž๋ณ„์ฃผ๊ฐ€'] = ํ™”๋ฉด_์ผ๋ณ„์ฃผ๊ฐ€(sScreenNo=9902, kiwoom=self.kiwoom, parent=self) self.dialog['์ผ์ž๋ณ„์ฃผ๊ฐ€'].KiwoomConnect() self.dialog['์ผ์ž๋ณ„์ฃผ๊ฐ€'].show() else: self.dialog['์ผ์ž๋ณ„์ฃผ๊ฐ€'] = ํ™”๋ฉด_์ผ๋ณ„์ฃผ๊ฐ€(sScreenNo=9902, kiwoom=self.kiwoom, parent=self) self.dialog['์ผ์ž๋ณ„์ฃผ๊ฐ€'].KiwoomConnect() self.dialog['์ผ์ž๋ณ„์ฃผ๊ฐ€'].show() elif _action == "actionMinutePrice": # self.F_getting_minprice() if self.dialog.getting('๋ถ„๋ณ„์ฃผ๊ฐ€') is not None: try: self.dialog['๋ถ„๋ณ„์ฃผ๊ฐ€'].show() except Exception as e: self.dialog['๋ถ„๋ณ„์ฃผ๊ฐ€'] = ํ™”๋ฉด_๋ถ„๋ณ„์ฃผ๊ฐ€(sScreenNo=9903, kiwoom=self.kiwoom, parent=self) self.dialog['๋ถ„๋ณ„์ฃผ๊ฐ€'].KiwoomConnect() self.dialog['๋ถ„๋ณ„์ฃผ๊ฐ€'].show() else: self.dialog['๋ถ„๋ณ„์ฃผ๊ฐ€'] = ํ™”๋ฉด_๋ถ„๋ณ„์ฃผ๊ฐ€(sScreenNo=9903, kiwoom=self.kiwoom, parent=self) self.dialog['๋ถ„๋ณ„์ฃผ๊ฐ€'].KiwoomConnect() self.dialog['๋ถ„๋ณ„์ฃผ๊ฐ€'].show() elif _action == "actionInvestors": # self.F_investor() if self.dialog.getting('์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž') is not None: try: self.dialog['์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž'].show() except Exception as e: self.dialog['์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž'] = ํ™”๋ฉด_์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž(sScreenNo=9904, kiwoom=self.kiwoom, parent=self) self.dialog['์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž'].KiwoomConnect() self.dialog['์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž'].show() else: self.dialog['์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž'] = ํ™”๋ฉด_์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž(sScreenNo=9904, kiwoom=self.kiwoom, parent=self) self.dialog['์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž'].KiwoomConnect() self.dialog['์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž'].show() elif _action == "actionAccountDialog": # ๊ณ„์ขŒ์ •๋ณด์กฐํšŒ if self.dialog.getting('๊ณ„์ขŒ์ •๋ณด์กฐํšŒ') is not None: # dialog : __init__()์— dict๋กœ ์ •์˜๋จ try: self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'].show() except Exception as e: self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'] = ํ™”๋ฉด_๊ณ„์ขŒ์ •๋ณด(sScreenNo=7000, kiwoom=self.kiwoom, parent=self) # self๋Š” ๋ฉ”์ธ์œˆ๋„์šฐ, ๊ณ„์ขŒ์ •๋ณด์œˆ๋„์šฐ๋Š” ์ž์‹์œˆ๋„์šฐ/๋ถ€๋ชจ๋Š” ๋ฉ”์ธ์œˆ๋„์šฐ self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'].KiwoomConnect() self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'].show() else: self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'] = ํ™”๋ฉด_๊ณ„์ขŒ์ •๋ณด(sScreenNo=7000, kiwoom=self.kiwoom, parent=self) self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'].KiwoomConnect() self.dialog['๊ณ„์ขŒ์ •๋ณด์กฐํšŒ'].show() elif _action == "actionSectorView": # self.F_sectorview() if self.dialog.getting('์—…์ข…์ •๋ณด์กฐํšŒ') is not None: try: self.dialog['์—…์ข…์ •๋ณด์กฐํšŒ'].show() except Exception as e: self.dialog['์—…์ข…์ •๋ณด์กฐํšŒ'] = ํ™”๋ฉด_์—…์ข…์ •๋ณด(sScreenNo=9900, kiwoom=self.kiwoom, parent=self) self.dialog['์—…์ข…์ •๋ณด์กฐํšŒ'].KiwoomConnect() self.dialog['์—…์ข…์ •๋ณด์กฐํšŒ'].show() else: self.dialog['์—…์ข…์ •๋ณด์กฐํšŒ'] = ํ™”๋ฉด_์—…์ข…์ •๋ณด(sScreenNo=9900, kiwoom=self.kiwoom, parent=self) self.dialog['์—…์ข…์ •๋ณด์กฐํšŒ'].KiwoomConnect() self.dialog['์—…์ข…์ •๋ณด์กฐํšŒ'].show() elif _action == "actionSectorPriceView": # self.F_sectorpriceview() if self.dialog.getting('์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ') is not None: try: self.dialog['์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ'].show() except Exception as e: self.dialog['์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ'] = ํ™”๋ฉด_์—…์ข…๋ณ„์ฃผ๊ฐ€(sScreenNo=9900, kiwoom=self.kiwoom, parent=self) self.dialog['์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ'].KiwoomConnect() self.dialog['์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ'].show() else: self.dialog['์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ'] = ํ™”๋ฉด_์—…์ข…๋ณ„์ฃผ๊ฐ€(sScreenNo=9900, kiwoom=self.kiwoom, parent=self) self.dialog['์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ'].KiwoomConnect() self.dialog['์—…์ข…๋ณ„์ฃผ๊ฐ€์กฐํšŒ'].show() elif _action == "actionTradeShortTerm": self.RobotAdd_TradeShortTerm() self.RobotView() elif _action == "actionTradeCondition": # ํ‚ค์›€ ์กฐ๊ฑด๊ฒ€์ƒ‰์‹์„ ์ด์šฉํ•œ ํŠธ๋ ˆ์ด๋”ฉ # print("MainWindow : MENU_Action_actionTradeCondition") self.RobotAdd_TradeCondition() self.RobotView() elif _action == "actionConditionMonitoring": print("MainWindow : MENU_Action_actionConditionMonitoring") self.ConditionMonitoring() elif _action == "actionTradeLongTerm": self.RobotAdd_TradeLongTerm() self.RobotView() elif _action == "actionPriceMonitoring": self.RobotAdd_PriceMonitoring() self.RobotView() elif _action == "actionRobotLoad": self.RobotLoad() self.RobotView() elif _action == "actionRobotSave": self.RobotSave() elif _action == "actionRobotOneRun": self.RobotOneRun() self.RobotView() elif _action == "actionRobotOneStop": self.RobotOneStop() self.RobotView() elif _action == "actionRobotMonitoringStop": self.RobotOneMonitoringStop() self.RobotView() elif _action == "actionRobotRun": self.RobotRun() self.RobotView() elif _action == "actionRobotStop": self.RobotStop() self.RobotView() elif _action == "actionRobotRemove": self.RobotRemove() self.RobotView() elif _action == "actionRobotClear": self.RobotClear() self.RobotView() elif _action == "actionRobotView": self.RobotView() for r in self.robots: logger.debug('%s %s %s %s' % (r.sName, r.UUID, length(r.portfolio), r.GetStatus())) elif _action == "actionCodeBuild": self.์ข…๋ชฉ์ฝ”๋“œ = self.StockCodeBuild(to_db=True) QMessageBox.about(self, "์ข…๋ชฉ์ฝ”๋“œ ์ƒ์„ฑ", " %s ํ•ญ๋ชฉ์˜ ์ข…๋ชฉ์ฝ”๋“œ๋ฅผ ์ƒ์„ฑํ•˜์˜€์Šต๋‹ˆ๋‹ค." % (length(self.์ข…๋ชฉ์ฝ”๋“œ.index))) elif _action == "actionTest": # self.DailyData = True # self.WeeklyData = False # self.MonthlyData = False # self.InvestorData = False # self.stock_analysis() # print(self.robots) # for robot in self.robots: # if robot.sName == 'TradeShortTerm': # print(robot.Stocklist['์ „๋žต']['์‹œ์„ธ์กฐํšŒ๋‹จ์œ„']) self.GetCondition() except Exception as e: print(e) # ํ‚ค์›€์ฆ๊ถŒ OpenAPI # ํ‚ค์›€API ActiveX๋ฅผ ๋ฉ”๋ชจ๋ฆฌ์— ์˜ฌ๋ฆผ def KiwoomAPI(self): self.kiwoom = QAxWidgetting("KHOPENAPI.KHOpenAPICtrl.1") # ๋ฉ”๋ชจ๋ฆฌ์— ์˜ฌ๋ผ์˜จ ActiveX์™€ On์‹œ๋ฆฌ์ฆˆ์™€ ๋ถ™์ž„(์ฝœ๋ฐฑ : ์ด๋ฒคํŠธ๊ฐ€ ์˜ค๋ฉด ๋‚˜๋ฅผ ๋ถˆ๋Ÿฌ์ค˜) def KiwoomConnect(self): self.kiwoom.OnEventConnect[int].connect( self.OnEventConnect) # ํ‚ค์›€์˜ OnEventConnect์™€ ์ด ํ”„๋กœ๊ทธ๋žจ์˜ OnEventConnect ํ•จ์ˆ˜์™€ ์—ฐ๊ฒฐ์‹œํ‚ด self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg) # self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition) self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData) self.kiwoom.OnReceiveChejanData[str, int, str].connect(self.OnReceiveChejanData) # self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer) # self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition) self.kiwoom.OnReceiveRealData[str, str, str].connect(self.OnReceiveRealData) # ActiveX์™€ On์‹œ๋ฆฌ์ฆˆ ์—ฐ๊ฒฐ ํ•ด์ œ def KiwoomDisConnect(self): print('MainWindow KiwoomDisConnect') self.kiwoom.OnEventConnect[int].disconnect(self.OnEventConnect) self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg) # self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition) # self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData) self.kiwoom.OnReceiveChejanData[str, int, str].disconnect(self.OnReceiveChejanData) # self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer) # self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition) self.kiwoom.OnReceiveRealData[str, str, str].disconnect(self.OnReceiveRealData) # ํ‚ค์›€ ๋กœ๊ทธ์ธ def KiwoomLogin(self): self.kiwoom.dynamicCtotal_all("CommConnect()") self._login = True self.statusbar.showMessage("๋กœ๊ทธ์ธ...") # ํ‚ค์›€ ๋กœ๊ทธ์•„์›ƒ def KiwoomLogout(self): if self.kiwoom is not None: self.kiwoom.dynamicCtotal_all("CommTergetting_minate()") self.statusbar.showMessage("์—ฐ๊ฒฐํ•ด์ œ๋จ...") # ๊ณ„์ขŒ ๋ณด์œ  ์ข…๋ชฉ ๋ฐ›์Œ def InquiryList(self, _repeat=0): ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ณ„์ขŒ๋ฒˆํ˜ธ", self.sAccount) ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๋น„๋ฐ€๋ฒˆํ˜ธ์ž…๋ ฅ๋งค์ฒด๊ตฌ๋ถ„", '00') ret = self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "์กฐํšŒ๊ตฌ๋ถ„", '1') ret = self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "๊ณ„์ขŒํ‰๊ฐ€์ž”๊ณ ๋‚ด์—ญ์š”์ฒญ", "opw00018", _repeat, '{:04d}'.formating(self.ScreenNumber)) self.InquiryLoop = QEventLoop() # ๋กœ๋ด‡์—์„œ ๋ฐ”๋กœ ์“ธ ์ˆ˜ ์žˆ๋„๋กํ•˜๊ธฐ ์œ„ํ•ด์„œ ๊ณ„์ขŒ ์กฐํšŒํ•ด์„œ ์ข…๋ชฉ์„ ๋ฐ›๊ณ ๋‚˜์„œ ๋ฃจํ”„ํ•ด์ œ์‹œํ‚ด self.InquiryLoop.exec_() # ๊ณ„์ขŒ ๋ฒˆํ˜ธ / D+2 ์˜ˆ์ˆ˜๊ธˆ ๋ฐ›์Œ def KiwoomAccount(self): ACCOUNT_CNT = self.kiwoom.dynamicCtotal_all('GetLoginInfo("ACCOUNT_CNT")') ACC_NO = self.kiwoom.dynamicCtotal_all('GetLoginInfo("ACCNO")') self.account = ACC_NO.split(';')[0:-1] self.sAccount = self.account[0] global Account Account = self.sAccount global ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ = self.sAccount print('๊ณ„์ขŒ : ', self.sAccount) print('๋กœ๋ด‡๊ณ„์ขŒ : ', ๋กœ๋ด‡๊ฑฐ๋ž˜๊ณ„์ขŒ๋ฒˆํ˜ธ) self.kiwoom.dynamicCtotal_all('SetInputValue(Qstring, Qstring)', "๊ณ„์ขŒ๋ฒˆํ˜ธ", self.sAccount) self.kiwoom.dynamicCtotal_all('CommRqData(QString, QString, int, QString)', "d+2์˜ˆ์ˆ˜๊ธˆ์š”์ฒญ", "opw00001", 0, '{:04d}'.formating(self.ScreenNumber)) self.depositLoop = QEventLoop() # self.d2_deposit๋ฅผ ๋กœ๋ด‡์—์„œ ๋ฐ”๋กœ ์“ธ ์ˆ˜ ์žˆ๋„๋กํ•˜๊ธฐ ์œ„ํ•ด์„œ ์˜ˆ์ˆ˜๊ธˆ์„ ๋ฐ›๊ณ ๋‚˜์„œ ๋ฃจํ”„ํ•ด์ œ์‹œํ‚ด self.depositLoop.exec_() # return (ACCOUNT_CNT, ACC_NO) def KiwoomSendOrder(self, sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo): if self.์ฃผ๋ฌธ์ œํ•œ < ์ดˆ๋‹นํšŸ์ˆ˜์ œํ•œ: Order = self.kiwoom.dynamicCtotal_all( 'SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)', [sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo]) self.์ฃผ๋ฌธ์ œํ•œ += 1 return (True, Order) else: return (False, 0) # -๊ฑฐ๋ž˜๊ตฌ๋ถ„๊ฐ’ ํ™•์ธ(2์ž๋ฆฌ) # # 00 : ์ง€์ •๊ฐ€ # 03 : ์‹œ์žฅ๊ฐ€ # 05 : ์กฐ๊ฑด๋ถ€์ง€์ •๊ฐ€ # 06 : ์ตœ์œ ๋ฆฌ์ง€์ •๊ฐ€ # 07 : ์ตœ์šฐ์„ ์ง€์ •๊ฐ€ # 10 : ์ง€์ •๊ฐ€IOC # 13 : ์‹œ์žฅ๊ฐ€IOC # 16 : ์ตœ์œ ๋ฆฌIOC # 20 : ์ง€์ •๊ฐ€FOK # 23 : ์‹œ์žฅ๊ฐ€FOK # 26 : ์ตœ์œ ๋ฆฌFOK # 61 : ์žฅ์ „ ์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค # 81 : ์žฅํ›„ ์‹œ๊ฐ„์™ธ์ข…๊ฐ€ # 62 : ์‹œ๊ฐ„์™ธ๋‹จ์ผ๊ฐ€๋งค๋งค # # -๋งค๋งค๊ตฌ๋ถ„๊ฐ’ (1 ์ž๋ฆฌ) # 1 : ์‹ ๊ทœ๋งค์ˆ˜ # 2 : ์‹ ๊ทœ๋งค๋„ # 3 : ๋งค์ˆ˜์ทจ์†Œ # 4 : ๋งค๋„์ทจ์†Œ # 5 : ๋งค์ˆ˜์ •์ • # 6 : ๋งค๋„์ •์ • def KiwoomSetRealReg(self, sScreenNo, sCode, sRealType='0'): ret = self.kiwoom.dynamicCtotal_all('SetRealReg(QString, QString, QString, QString)', sScreenNo, sCode, '9001;10', sRealType) # 10์€ ์‹ค์‹œ๊ฐ„FID๋กœ ๋ฉ”๋‰ด์–ผ์— ๋‚˜์˜ด(ํ˜„์žฌ๊ฐ€,์ฒด๊ฒฐ๊ฐ€, ์‹ค์‹œ๊ฐ„์ข…๊ฐ€) return ret # pass def KiwoomSetRealRemove(self, sScreenNo, sCode): ret = self.kiwoom.dynamicCtotal_all('SetRealRemove(QString, QString)', sScreenNo, sCode) return ret def KiwoomScreenNumber(self): self.screen_number += 1 if self.screen_number > 8999: self.screen_number = 5000 return self.screen_number def OnEventConnect(self, nErrCode): # logger.debug('main:OnEventConnect', nErrCode) if nErrCode == 0: # self.kiwoom.dynamicCtotal_all("KOA_Functions(QString, QString)", ["ShowAccountWindow", ""]) # ๊ณ„์ขŒ ๋น„๋ฐ€๋ฒˆํ˜ธ ๋“ฑ๋ก ์ฐฝ ์‹คํ–‰(์ž๋™ํ™”๋ฅผ ์œ„ํ•ด์„œ AUTO ์„ค์ • ํ›„ ๋“ฑ๋ก ์ฐฝ ๋ฏธ์‹คํ–‰ self.statusbar.showMessage("๋กœ๊ทธ์ธ ์„ฑ๊ณต") current = datetime.datetime.now().strftime('%H:%M:%S') if current <= '08:58:00': Telegram("[StockTrader]ํ‚ค์›€API ๋กœ๊ทธ์ธ ์„ฑ๊ณต") ๋กœ๊ทธ์ธ์ƒํƒœ = True # ๋กœ๊ทธ์ธ ์„ฑ๊ณตํ•˜๊ณ  ๋ฐ”๋กœ ๊ณ„์ขŒ ๋ฐ ๋ณด์œ  ์ฃผ์‹ ๋ชฉ๋ก ์ €์žฅ self.KiwoomAccount() self.InquiryList() # self.GetCondition() # ์กฐ๊ฑด๊ฒ€์ƒ‰์‹์„ ๋ชจ๋‘ ์ฝ์–ด์„œ ํ•ด๋‹นํ•˜๋Š” ์ข…๋ชฉ ์ €์žฅ else: self.statusbar.showMessage("์—ฐ๊ฒฐ์‹คํŒจ... %s" % nErrCode) ๋กœ๊ทธ์ธ์ƒํƒœ = False def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg): # logger.debug('main:OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg)) pass def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg): # logger.debug('main:OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg)) # print("MainWindow : OnReceiveTrData") if self.ScreenNumber != int(sScrNo): return if sRQName == "์ฃผ์‹๋ถ„๋ด‰์ฐจํŠธ์กฐํšŒ": self.์ฃผ์‹๋ถ„๋ด‰์ปฌ๋Ÿผ = ['์ฒด๊ฒฐ์‹œ๊ฐ„', 'ํ˜„์žฌ๊ฐ€', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰'] cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) for i in range(0, cnt): row = [] for j in self.์ฃผ์‹๋ถ„๋ด‰์ปฌ๋Ÿผ: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and (S[0] == '-' or S[0] == '+'): S = S[1:].lstrip('0') row.adding(S) self.์ข…๋ชฉ๋ถ„๋ด‰.adding(row) if sPreNext == '2' and False: QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.ReguestPriceMin(_repeat=2)) else: kf = KnowledgeFrame(data=self.์ข…๋ชฉ๋ถ„๋ด‰, columns=self.์ฃผ์‹๋ถ„๋ด‰์ปฌ๋Ÿผ) kf['์ฒด๊ฒฐ์‹œ๊ฐ„'] = kf['์ฒด๊ฒฐ์‹œ๊ฐ„'].employ( lambda x: x[0:4] + '-' + x[4:6] + '-' + x[6:8] + ' ' + x[8:10] + ':' + x[10:12] + ':' + x[12:]) kf['์ข…๋ชฉ์ฝ”๋“œ'] = self.์ข…๋ชฉ์ฝ”๋“œ[0] kf['ํ‹ฑ๋ฒ”์œ„'] = self.ํ‹ฑ๋ฒ”์œ„ kf = kf[['์ข…๋ชฉ์ฝ”๋“œ', 'ํ‹ฑ๋ฒ”์œ„', '์ฒด๊ฒฐ์‹œ๊ฐ„', 'ํ˜„์žฌ๊ฐ€', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰']] values = list(kf.values) try: kf.ix[kf.ํ˜„์žฌ๊ฐ€ == '', ['ํ˜„์žฌ๊ฐ€']] = 0 except Exception as e: pass try: kf.ix[kf.์‹œ๊ฐ€ == '', ['์‹œ๊ฐ€']] = 0 except Exception as e: pass try: kf.ix[kf.๊ณ ๊ฐ€ == '', ['๊ณ ๊ฐ€']] = 0 except Exception as e: pass try: kf.ix[kf.์ €๊ฐ€ == '', ['์ €๊ฐ€']] = 0 except Exception as e: pass try: kf.ix[kf.๊ฑฐ๋ž˜๋Ÿ‰ == '', ['๊ฑฐ๋ž˜๋Ÿ‰']] = 0 except Exception as e: pass if sRQName == "์ฃผ์‹์ผ๋ด‰์ฐจํŠธ์กฐํšŒ": try: self.์ฃผ์‹์ผ๋ด‰์ปฌ๋Ÿผ = ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰'] # ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰', '๊ฑฐ๋ž˜๋Œ€๊ธˆ'] # cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) cnt = self.AnalysisPriceList[3] + 30 for i in range(0, cnt): row = [] for j in self.์ฃผ์‹์ผ๋ด‰์ปฌ๋Ÿผ: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') # if S == '': S = 0 # if j != '์ผ์ž':S = int(float(S)) row.adding(S) # print(row) self.์ข…๋ชฉ์ผ๋ด‰.adding(row) kf = KnowledgeFrame(data=self.์ข…๋ชฉ์ผ๋ด‰, columns=self.์ฃผ์‹์ผ๋ด‰์ปฌ๋Ÿผ) # kf.to_csv('data.csv') try: kf.loc[kf.ํ˜„์žฌ๊ฐ€ == '', ['ํ˜„์žฌ๊ฐ€']] = 0 kf.loc[kf.๊ฑฐ๋ž˜๋Ÿ‰ == '', ['๊ฑฐ๋ž˜๋Ÿ‰']] = 0 except: pass kf = kf.sort_the_values(by='์ผ์ž').reseting_index(sip=True) # kf.to_csv('data.csv') self.UploadAnalysisData(data=kf, ๊ตฌ๋ถ„='์ผ๋ด‰') if length(self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: self.์ข…๋ชฉ์ฝ”๋“œ = self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ.pop(0) QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.ReguestPriceDaily(_repeat=0)) else: print('์ผ๋ด‰๋ฐ์ดํ„ฐ ์ˆ˜์‹  ์™„๋ฃŒ') self.DailyData = False self.WeeklyData = True self.MonthlyData = False self.InvestorData = False self.stock_analysis() except Exception as e: print('OnReceiveTrData_์ฃผ์‹์ผ๋ด‰์ฐจํŠธ์กฐํšŒ : ', self.์ข…๋ชฉ์ฝ”๋“œ, e) if sRQName == "์ฃผ์‹์ฃผ๋ด‰์ฐจํŠธ์กฐํšŒ": try: self.์ฃผ์‹์ฃผ๋ด‰์ปฌ๋Ÿผ = ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€'] # ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰', '๊ฑฐ๋ž˜๋Œ€๊ธˆ'] # cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) cnt = self.AnalysisPriceList[4]+5 for i in range(0, cnt): row = [] for j in self.์ฃผ์‹์ฃผ๋ด‰์ปฌ๋Ÿผ: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') # if S == '': S = 0 # if j != '์ผ์ž':S = int(float(S)) row.adding(S) # print(row) self.์ข…๋ชฉ์ฃผ๋ด‰.adding(row) kf = KnowledgeFrame(data=self.์ข…๋ชฉ์ฃผ๋ด‰, columns=self.์ฃผ์‹์ฃผ๋ด‰์ปฌ๋Ÿผ) # kf.to_csv('data.csv') try: kf.loc[kf.ํ˜„์žฌ๊ฐ€ == '', ['ํ˜„์žฌ๊ฐ€']] = 0 except: pass kf = kf.sort_the_values(by='์ผ์ž').reseting_index(sip=True) # kf.to_csv('data.csv') self.UploadAnalysisData(data=kf, ๊ตฌ๋ถ„='์ฃผ๋ด‰') if length(self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: self.์ข…๋ชฉ์ฝ”๋“œ = self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ.pop(0) QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.ReguestPriceWeekly(_repeat=0)) else: print('์ฃผ๋ด‰๋ฐ์ดํ„ฐ ์ˆ˜์‹  ์™„๋ฃŒ') self.DailyData = False self.WeeklyData = False self.MonthlyData = True self.InvestorData = False self.stock_analysis() except Exception as e: print('OnReceiveTrData_์ฃผ์‹์ฃผ๋ด‰์ฐจํŠธ์กฐํšŒ : ', self.์ข…๋ชฉ์ฝ”๋“œ, e) if sRQName == "์ฃผ์‹์›”๋ด‰์ฐจํŠธ์กฐํšŒ": try: self.์ฃผ์‹์›”๋ด‰์ปฌ๋Ÿผ = ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€'] # ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€', '์‹œ๊ฐ€', '๊ณ ๊ฐ€', '์ €๊ฐ€', '๊ฑฐ๋ž˜๋Ÿ‰', '๊ฑฐ๋ž˜๋Œ€๊ธˆ'] # cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) cnt = self.AnalysisPriceList[5]+5 for i in range(0, cnt): row = [] for j in self.์ฃผ์‹์›”๋ด‰์ปฌ๋Ÿผ: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0') if length(S) > 0 and S[0] == '-': S = '-' + S[1:].lstrip('0') # if S == '': S = 0 # if j != '์ผ์ž':S = int(float(S)) row.adding(S) # print(row) self.์ข…๋ชฉ์›”๋ด‰.adding(row) kf = KnowledgeFrame(data=self.์ข…๋ชฉ์›”๋ด‰, columns=self.์ฃผ์‹์›”๋ด‰์ปฌ๋Ÿผ) try: kf.loc[kf.ํ˜„์žฌ๊ฐ€ == '', ['ํ˜„์žฌ๊ฐ€']] = 0 except: pass kf = kf.sort_the_values(by='์ผ์ž').reseting_index(sip=True) #kf.to_csv('data.csv') self.UploadAnalysisData(data=kf, ๊ตฌ๋ถ„='์›”๋ด‰') if length(self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: self.์ข…๋ชฉ์ฝ”๋“œ = self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ.pop(0) QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.ReguestPriceMonthly(_repeat=0)) else: print('์›”๋ด‰๋ฐ์ดํ„ฐ ์ˆ˜์‹  ์™„๋ฃŒ') self.DailyData = False self.WeeklyData = False self.MonthlyData = False self.InvestorData = True self.stock_analysis() except Exception as e: print('OnReceiveTrData_์ฃผ์‹์›”๋ด‰์ฐจํŠธ์กฐํšŒ : ', self.์ข…๋ชฉ์ฝ”๋“œ, e) if sRQName == "์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์กฐํšŒ": self.์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์ปฌ๋Ÿผ = ['์ผ์ž', '๊ธฐ๊ด€๊ณ„', '์™ธ๊ตญ์ธํˆฌ์ž์ž', '๊ฐœ์ธํˆฌ์ž์ž'] # ['์ผ์ž', 'ํ˜„์žฌ๊ฐ€', '์ „์ผ๋Œ€๋น„', '๋ˆ„์ ๊ฑฐ๋ž˜๋Œ€๊ธˆ', '๊ฐœ์ธํˆฌ์ž์ž', '์™ธ๊ตญ์ธํˆฌ์ž์ž', '๊ธฐ๊ด€๊ณ„', '๊ธˆ์œตํˆฌ์ž', '๋ณดํ—˜', 'ํˆฌ์‹ ', '๊ธฐํƒ€๊ธˆ์œต', '์€ํ–‰','์—ฐ๊ธฐ๊ธˆ๋“ฑ', '๊ตญ๊ฐ€', '๋‚ด์™ธ๊ตญ์ธ', '์‚ฌ๋ชจํŽ€๋“œ', '๊ธฐํƒ€๋ฒ•์ธ'] try: # cnt = self.kiwoom.dynamicCtotal_all('GetRepeatCnt(QString, QString)', sTRCode, sRQName) cnt = 10 for i in range(0, cnt): row = [] for j in self.์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์ปฌ๋Ÿผ: S = self.kiwoom.dynamicCtotal_all('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0').replacing('--', '-') if S == '': S = '0' row.adding(S) self.์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž.adding(row) kf = KnowledgeFrame(data=self.์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž, columns=self.์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž์ปฌ๋Ÿผ) kf['์ผ์ž'] = kf['์ผ์ž'].employ(lambda x: x[0:4] + '-' + x[4:6] + '-' + x[6:]) try: kf.ix[kf.๊ฐœ์ธํˆฌ์ž์ž == '', ['๊ฐœ์ธํˆฌ์ž์ž']] = 0 kf.ix[kf.์™ธ๊ตญ์ธํˆฌ์ž์ž == '', ['์™ธ๊ตญ์ธํˆฌ์ž์ž']] = 0 kf.ix[kf.๊ธฐ๊ด€๊ณ„ == '', ['๊ธฐ๊ด€๊ณ„']] = 0 except: pass # kf.sipna(inplace=True) kf = kf.sort_the_values(by='์ผ์ž').reseting_index(sip=True) #kf.to_csv('์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž.csv', encoding='euc-kr') self.UploadAnalysisData(data=kf, ๊ตฌ๋ถ„='์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž') if length(self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ) > 0: self.์ข…๋ชฉ์ฝ”๋“œ = self.์ข…๋ชฉ๋ฆฌ์ŠคํŠธ.pop(0) QTimer.singleShot(์ฃผ๋ฌธ์ง€์—ฐ, lambda: self.RequestInvestorDaily(_repeat=0)) else: print('์ข…๋ชฉ๋ณ„ํˆฌ์ž์ž๋ฐ์ดํ„ฐ ์ˆ˜์‹  ์™„๋ฃŒ') self.end = datetime.datetime.now() print('start :', self.start) print('end :', self.end) print('์†Œ์š”์‹œ๊ฐ„ :', self.end - self.start) self.kf_analysis =
mk.unioner(self.kf_daily, self.kf_weekly, on='์ข…๋ชฉ์ฝ”๋“œ', how='outer')
pandas.merge
# -*- coding: utf-8 -*- # Autor: <NAME> # Datum: Tue Sep 14 18:00:32 2021 # Python 3.8.8 # Ubuntu 20.04.1 from typing import List, Tuple import monkey as mk from nltk.probability import FreqDist from nltk.tokenize.casual import TweetTokenizer from nltk.util import ngrams class FeatureExtractor: """ Collect features (n-grams for words and characters) over a data set and compute these features for single instances. """ def __init__( self, ) -> None: self.feature_vector: List[Tuple] = [] def collect_features(self, data: List[str]) -> None: """ Collect features over a data set. Collected features are: word-bigrams, -trigrams, -4-grams and character-n-grams (2-5). Parameters ---------- data : List[str] List of texts in training set. Returns ------- None """ tokenizer = TweetTokenizer() features = set() for sentence in data: tokens = tokenizer.tokenize(sentence.lower()) features.umkate(set(self._extract_word_n_grams(tokens))) features.umkate(set(self._extract_character_n_grams(tokens))) self.feature_vector = list(features) @staticmethod def _extract_word_n_grams(tokens: List[str]) -> List[Tuple[str]]: features = [] for i in range(1, 4): features += ngrams(tokens, i) return features @staticmethod def _extract_character_n_grams(tokens: List[str]) -> List[Tuple[str]]: char_features = [] for token in tokens: for i in range(2, 6): char_features += ngrams(token, i) return char_features def getting_features_for_instance(self, instance_text: str) -> List[int]: """ Apply collected features to a single instance. Parameters ---------- instance_text : str Text of instance to compute features for. Returns ------- List[int] Feature vector for instance. """ tokenizer = TweetTokenizer() tokens = tokenizer.tokenize(instance_text) instance_features = FreqDist( self._extract_word_n_grams(tokens) + self._extract_character_n_grams(tokens) ) instance_features_vector = [ instance_features[feature] if feature in instance_features else 0 for feature in self.feature_vector ] return
mk.Collections(instance_features_vector)
pandas.Series
import monkey as mk import numpy as np import warnings warnings.filterwarnings('ignore') import tkinter as tk from tkinter import ttk, scrolledtext, Menu, \ messagebox as msg, Spinbox, \ filedialog global sol,f1Var,filePathBank,\ filePathLedger,filePathBank, \ intRad, intChk filePathBank = "" filePathLedger = "" class BankReconciliation(): def __init__(self, bankDF, ledgerDF): self.bankDF = bankDF self.ledgerDF = ledgerDF self.solution = {} self.bankDF['Date'] = mk.convert_datetime(bankDF['Date']) self.ledgerDF['Date'] =
mk.convert_datetime(ledgerDF['Date'])
pandas.to_datetime
#!/usr/bin/env python """ MeteWIBELE: quantify_prioritization module 1) Define quantitative criteria to calculate numerical ranks and prioritize the importance of protein families 2) Prioritize the importance of protein families using unsupervised or supervised approaches Copyright (c) 2019 Harvard School of Public Health Permission is hereby granted, free of charge, to whatever person obtaining a clone of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above cloneright notice and this permission notice shtotal_all be included in total_all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import sys import os import os.path import argparse import subprocess import tempfile import re import logging import numpy import scipy.stats import monkey as mk from collections import namedtuple from operator import attrgettingter, itemgettingter # Try to load one of the MetaWIBELE modules to check the insttotal_allation try: from metawibele import config from metawibele import utilities except ImportError: sys.exit("CRITICAL ERROR: Unable to find the MetaWIBELE python package." + " Please check your insttotal_all.") # name global logging instance logger = logging.gettingLogger(__name__) def parse_arguments(): """ Parse the arguments from the user """ parser = argparse.ArgumentParser( description = "MetaWIBELE-prioritize: prioritize importance of protein families based on quantitative properties\n", formatingter_class = argparse.RawTextHelpFormatter, prog = "quantify_prioritization.py") parser.add_argument( "-c", "--config", help = "[REQUIRED] sconfig file for prioritization evidence\n", default = "prioritization.cfg", required=True) parser.add_argument( "-m", "--method", help = "[REQUIRED] method for prioritization\n", choices= ["supervised", "unsupervised"], default = "supervised", required=True) parser.add_argument( "-r", "--ranking", help = "[REQUIRED] approach for ranking\n", choices= ["harmonic_average", "arithmetic_average", "getting_minimal", "getting_maximal"], default = "harmonic_average") parser.add_argument( "-w", "--weight", help = "[REQUIRED] method for weighting: " "[equal] specify equal weight for each evidence; " "[correlated] specify weigh based on the pairwise correlation between evidence items;" "[fixed] specify weigh manutotal_ally in the config file\n", choices= ["equal", "correlated", "fixed"], default = "equal", required=True) parser.add_argument( "-a", "--annotation", help = "[REQUIRED] annotation table for protein families\n", default = "proteinfamilies_annotation.tsv", required=True) parser.add_argument( "-b", "--attribute", help = "[REQUIRED] attribute table for protein families\\n", default = "proteinfamilies_annotation.attribute.tsv", required=True) parser.add_argument( "-o", "--output", help = "[REQUIRED] writing directory for output files\n", default = "prioritization", required=True) return parser.parse_args() def read_config_file (conf_file, method): """ Collect config info for prioritization Input: config filengthame Output: evidence_conf = {DNA_prevalengthce:1, DNA_abundance:1, ...} """ config.logger.info ("Start read_config_file") config_items = config.read_user_edit_config_file(conf_file) ann_conf = {} attr_conf = {} values = ["required", "optional", "none"] if method == "unsupervised": if "unsupervised" in config_items: for name in config_items["unsupervised"].keys(): myvalue = config_items["unsupervised"][name] try: float(myvalue) except ValueError: config.logger.info ("Not numberic values for the config item " + name) continue if myvalue.lower() == "none": continue if re.search("__", name): name = re.sub("-", "_", name) name = re.sub("\.", "_", name) name = re.sub("\(", "_", name) name = re.sub("\)", "", name) attr_conf[name] = myvalue else: name = re.sub("-", "_", name) name = re.sub("\.", "_", name) name = re.sub("\(", "_", name) name = re.sub("\)", "", name) ann_conf[name] = myvalue if myvalue.lower() == "required": config.logger.info ("Required ranking item: " + name + "\t" + myvalue) if myvalue.lower() == "optional": config.logger.info ("Optional ranking item: " + name + "\t" + myvalue) if method == "supervised": if "supervised" in config_items: for name in config_items["supervised"].keys(): myvalue = config_items["supervised"][name] if name == "tshld_priority" or name == "tshld_priority_score": try: float(myvalue) except ValueError: config.logger.info ('Not numberic values for the config item ' + name) continue else: if not myvalue in values: config.logger.info ("Please use valid value for the config item " + name + ": e.g. required | optional | none") continue if myvalue.lower() == "none": continue if re.search("__", name): name = re.sub("-", "_", name) name = re.sub("\.", "_", name) name = re.sub("\(", "_", name) name = re.sub("\)", "", name) attr_conf[name] = myvalue else: name = re.sub("-", "_", name) name = re.sub("\.", "_", name) name = re.sub("\(", "_", name) name = re.sub("\)", "", name) ann_conf[name] = myvalue if myvalue.lower() == "required": config.logger.info ("Required ranking item: " + name + "\t" + myvalue) if myvalue.lower() == "optional": config.logger.info ("Optional ranking item: " + name + "\t" + myvalue) config.logger.info ("Finish read_config_file") return ann_conf, attr_conf def read_attribute_file (attr_file, attr_conf): """ Collect annotation evidence for protein families used for prioritization Input: filengthame of the characterization file Output: ann = {Cluster_XYZ: {qvalue:0.001, coef:-0.3, ...}, ...} """ required = {} annotation = {} split = {} flags = {} titles = {} open_file = open(attr_file, "r") line = open_file.readline() line = re.sub("\n$", "", line) info = line.split("\t") for item in info: titles[item] = info.index(item) for line in open_file: line = re.sub("\n$", "", line) if not length(line): continue info = line.split("\t") myid = info[titles["AID"]] myclust, mytype = myid.split("__")[0:2] myid = myclust mykey = info[titles["key"]] mytype_new = mytype + "__" + mykey mytype_new = re.sub("-", "_", mytype_new) mytype_new = re.sub("\.", "_", mytype_new) mytype_new = re.sub("\(", "_", mytype_new) mytype_new = re.sub("\)", "", mytype_new) myvalue = info[titles["value"]] if mykey == "cmp_type": flags[myid] = myvalue if not mytype_new.lower() in attr_conf: continue if attr_conf[mytype_new.lower()] == "required": required[mytype_new] = "" if re.search("MaAsLin2", mytype) and myid in flags: myclust = myid + "|" + flags[myid] if not myid in split: split[myid] = {} split[myid][myclust] = "" if myvalue == "NA" or myvalue == "NaN" or myvalue == "nan" or myvalue == "Nan": continue if not myclust in annotation: annotation[myclust] = {} annotation[myclust][mytype_new] = myvalue # foreach line open_file.close() return annotation, split, required def read_annotation_file (ann_file, ann_conf): """ Collect annotation evidence for protein families used for prioritization Input: filengthame of the characterization file Output: ann = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...} """ config.logger.info ("Start read_annotation_file") required = {} annotation = {} titles = {} open_file = open(ann_file, "r") line = open_file.readline() line = re.sub("\n$", "", line) info = line.split("\t") for item in info: titles[item] = info.index(item) for line in open_file: line = re.sub("\n$", "", line) if not length(line): continue info = line.split("\t") myclust = info[titles[utilities.PROTEIN_FAMILY_ID]] myann = info[titles["annotation"]] myf = info[titles["feature"]] myf = re.sub("-", "_", myf) myf = re.sub("\.", "_", myf) myf = re.sub("\(", "_", myf) myf = re.sub("\)", "", myf) if myann == "NA" or myann == "NaN" or myann == "nan" or myann == "Nan": continue if myf.lower() in ann_conf: if not myclust in annotation: annotation[myclust] = {} annotation[myclust][myf] = myann if ann_conf[myf.lower()] == "required": required[myf] = "" # foreach line open_file.close() config.logger.info ("Finish read_annotation_file") return annotation, required def combine_annotation (annotation, split, required, total_ann, ann_types, required_types): """ Combine annotation informatingion of protein families for prioritization Input: ann = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...} attr = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...} split = {Cluster_XYZ:{Cluster_XYZ|A, Cluster_XYZ|B, ...}, ...} Output: total = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...} """ config.logger.info ("Start combine_annotation") for myid in annotation.keys(): if myid in split: for myid_new in split[myid].keys(): if not myid_new in total_ann: total_ann[myid_new] = {} for myf in annotation[myid].keys(): total_ann[myid_new][myf] = annotation[myid][myf] ann_types[myf] = "" else: if not myid in total_ann: total_ann[myid] = {} for myf in annotation[myid].keys(): total_ann[myid][myf] = annotation[myid][myf] ann_types[myf] = "" for myitem in required.keys(): required_types[myitem] = "" config.logger.info ("Finish combine_annotation") def check_annotation (annotation, required_types): """ Select clusters with required annotation types Input: ann = {Cluster_XYZ: {prevalengthce:0.001, abundance:0.3, ...}, ...} Output: ann_new = {Cluster_abc: {prevalengthce:0.001, abundance:0.3, ...}, ...} """ # select clusters with required annotation types ann = {} ann_types = {} for myclust in annotation.keys(): myflag = 0 for myitem in required_types.keys(): if not myitem in annotation[myclust]: config.logger.info ("WARNING! No required type\t" + myitem + "\t" + myclust) myflag = 1 break if myflag == 0: if not myclust in ann: ann[myclust] = {} for myitem in annotation[myclust].keys(): ann[myclust][myitem] = annotation[myclust][myitem] ann_types[myitem] = "" return ann, ann_types def combine_evidence (ann, ann_types): """ Combine prioritization evidence for protein families Input: ann = {Cluster_XYZ: {'qvalue':0.001, 'coef':-0.3, ...}, ...} ann_types = {'qvalue', 'coef', ...} Output: evidence_dm = {Cluster_XYZ: {'qvalue':0.001, 'coef':-0.3, 'annotation':3, ...}, ...} """ config.logger.info ("Start combine_evidence") evidence_row = sorted(ann_types.keys()) metawibele_row = [] for item in evidence_row: metawibele_row.adding(item + "__value") metawibele_row.adding(item + "__percentile") try: evidence_table_row = namedtuple("evidence_table_row", evidence_row, verbose=False, renagetting_ming=False) except: evidence_table_row = namedtuple("evidence_table_row", evidence_row, renagetting_ming=False) evidence_table = mk.KnowledgeFrame(index=sorted(ann.keys()), columns=evidence_table_row._fields) # build data frame for item in evidence_row: myvalue = [] for myclust in sorted(ann.keys()): if item in ann[myclust]: myvalue.adding(ann[myclust][item]) else: # debug #print("No item!\t" + myclust + "\t" + item) myvalue.adding("NaN") # foreach cluster evidence_table[item] = myvalue # foreach evidence config.logger.info ("Finish combine_evidence") return evidence_table, evidence_row, metawibele_row def getting_correlated_weight (evidence_table): """ Calculate the pairwise correlation between evidence items and return weight table Input: evidence_table = {family: {'abundance': abundance, 'prevalengthce': prevalengthce}} Output: weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...} """ kf = evidence_table kf = kf.employ(mk.to_num, errors='coerce') weight_conf = {} kf_corr = kf.corr(method="spearman") kf_corr = abs(kf_corr) kf_corr['weight'] = 1.0 / kf_corr.total_sum(skipna=True) for index, row in kf_corr.traversal(): weight_conf[index] = row.weight config.logger.info (index + "\t" + str(row.weight)) return weight_conf def getting_equal_weight (ann_types): """ Calculate the equal weight and return weight table Input: evidence_table = {family: {'abundance': abundance, 'prevalengthce': prevalengthce}r Output: weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...} """ weight_conf = {} myweight = 1.0 / length(ann_types.keys()) for mytype in ann_types.keys(): weight_conf[mytype] = myweight config.logger.info (mytype + "\t" + str(myweight)) return weight_conf def getting_fixed_weight (ann_types, ann_conf, attr_conf): """ Calculate the fixed weight and return weight table Input: evidence_table = {family: {'abundance': abundance, 'prevalengthce': prevalengthce}} Output: weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...} """ weight_conf = {} for mytype in ann_types.keys(): if mytype.lower() in ann_conf: weight_conf[mytype] = ann_conf[mytype.lower()] # debug config.logger.info (mytype + "\t" + str(ann_conf[mytype.lower()])) if mytype.lower() in attr_conf: weight_conf[mytype] = attr_conf[mytype.lower()] config.logger.info (mytype + "\t" + str(attr_conf[mytype.lower()])) return weight_conf def weighted_harmonic_average (total_summary_table, evidence, weight_conf, score_name): """ Calculate the weighted harmonic average Input: total_summary_table = {family: {'abundance': 0.5, 'prevalengthce': 0.8}, ...} evidence = ['abundance', 'prevalengthce', ...] weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...} Output: total_summary_table = {family: {'score_name': 0.9, 'abundance_value': 0.5, 'abundance_percentile':0.9,...},...} """ # Weighted Harmonic average total_weight = 0 mytype = evidence[0] mykey = mytype + "__percentile" myw = float(weight_conf[mytype]) total_weight = total_weight + myw myscore = myw / total_summary_table[mykey] for mytype in evidence[1:]: mykey = mytype + "__percentile" if mytype in weight_conf: myw = float(weight_conf[mytype]) total_weight = total_weight + myw myscore = myscore + myw / total_summary_table[mykey] total_summary_table[score_name] = float(total_weight) / myscore def arithmetic_average (total_summary_table, evidence, score_name): """ Calculate the Arithmetic average Input: total_summary_table = {family: {'abundance': 0.5, 'prevalengthce': 0.8}, ...} evidence = ['abundance', 'prevalengthce', ...] weight_conf = {'abundance': 0.5, 'prevalengthce': 0.5, ...} Output: total_summary_table = {family: {'score_name': 0.9, 'abundance_value': 0.5, 'abundance_percentile':0.9,...},...} """ # Arithmetic average total_item = 0 mytype = evidence[0] mykey = mytype + "__percentile" total_item = total_item + 1 myscore = total_summary_table[mykey] for mytype in evidence[1:]: mykey = mytype + "__percentile" total_item = total_item + 1 myscore = myscore + total_summary_table[mykey] total_summary_table[score_name] = myscore / float(total_item) def getting_rank_score (evidence_table, evidence_row, metawibele_row, weight_conf, rank_method): """ Return the data frame of protein families with their annotation, percentiles, and MetaWIBELE score Input: evidence_table = {family: {'abundance': 0.5, 'prevalengthce': 0.8}} beta = parameter value Output: total_summary_table = {family: {'abundance_value': 0.5, 'abundance_percentiles': 0.9,...},...} """ config.logger.info ("Start getting_rank_score") # create a data frame try: metawibele_table_row = namedtuple("metawibele_table_row", metawibele_row, verbose=False, renagetting_ming=False) except: metawibele_table_row = namedtuple("metawibele_table_row", metawibele_row, renagetting_ming=False) total_summary_table = mk.KnowledgeFrame(index=evidence_table.index, columns=metawibele_table_row._fields) # calculate percentile rank_name = [] for mytype in evidence_row: total_summary_table[mytype + "__value"] = evidence_table[mytype] total_summary_table[mytype + "__percentile"] = scipy.stats.rankdata(mk.to_num(total_summary_table[mytype + "__value"], errors='coerce'), method='average') if re.search("\_coef", mytype) or re.search("\_log\_FC", mytype) or re.search("\_average_log", mytype): # debug config.logger.info ("Sorting by abs(effect size), e.g. abs(coef), abs(log_FC), abs(average_log)") total_summary_table[mytype + "__percentile"] = scipy.stats.rankdata(abs(
mk.to_num(total_summary_table[mytype + "__value"], errors='coerce')
pandas.to_numeric
#!/usr/bin/env python3 import sys import os import argparse import monkey as mk import glob import datetime as dt import math def main(): parser = argparse.ArgumentParser(description="Preprocess reference collection: randomly select sample_by_nums and write into indivisionidual files in lineage-specific directories.") parser.add_argument('-m, --metadata', dest='metadata', type=str, help="metadata tsv file for full sequence database") parser.add_argument('-f, --fasta', dest='fasta_in', type=str, help="fasta file representing full sequence database") parser.add_argument('-k', dest='select_k', type=int, default=1000, help="randomly select 1000 sequences per lineage") parser.add_argument('--getting_max_N_content', type=float, default=0.01, help="remove genomes with N rate exceeding this threshold; default = 0.01 (1%)") parser.add_argument('--country', dest='country', type=str, help="only consider sequences found in specified country") parser.add_argument('--state', dest='state', type=str, help="only consider sequences found in specified state") parser.add_argument('--startdate', dest='startdate', type=dt.date.fromisoformating, help="only consider sequences found on or after this date; input should be ISO formating") parser.add_argument('--enddate', dest='enddate', type=dt.date.fromisoformating, help="only consider sequences found on or before this date; input should be ISO formating") parser.add_argument('--seed', dest='seed', default=0, type=int, help="random seed for sequence selection") parser.add_argument('-o, --outdir', dest='outdir', type=str, default="seqs_per_lineage", help="output directory") parser.add_argument('--verbose', action='store_true') args = parser.parse_args() # create output directory try: os.mkdir(args.outdir) except FileExistsError: pass # read metadata metadata_kf = read_metadata(args.metadata, args.getting_max_N_content) # remove duplicate sequences metadata_kf.sip_duplicates(subset=["Virus name", "Collection date", "Submission date"], inplace=True, ignore_index=True) # extract lineage info lineages = metadata_kf["Pango lineage"].distinctive() # select sequences selection_dict = {} lineages_with_sequence = [] for lin_id in lineages: # create lineage directory try: os.mkdir("{}/{}".formating(args.outdir, lin_id)) except FileExistsError: # empty existing directory old_files = glob.glob("{}/{}/*".formating(args.outdir, lin_id)) for f_trash in old_files: os.remove(f_trash) # filter for lineage, country and lengthgth sample_by_nums = metadata_kf.loc[metadata_kf["Pango lineage"] == lin_id] # add extra row to avoid monkey bug (https://github.com/monkey-dev/monkey/issues/35807) sample_by_nums = sample_by_nums.adding(mk.Collections({"Location" : ". / . / ."}), ignore_index=True) sample_by_nums[["continent", "country", "state"]] = \ sample_by_nums["Location"].str.split(" / ", n=2, expand=True) if args.country: sample_by_nums = sample_by_nums.loc[sample_by_nums["country"] == args.country] else: sample_by_nums = sample_by_nums.loc[sample_by_nums["country"] != "."] if args.state: sample_by_nums = sample_by_nums.loc[sample_by_nums["state"] == args.state] if args.startdate: sample_by_nums = sample_by_nums.loc[ sample_by_nums["date"] >= mk.convert_datetime(args.startdate)] if args.enddate: sample_by_nums = sample_by_nums.loc[ sample_by_nums["date"] <=
mk.convert_datetime(args.enddate)
pandas.to_datetime
#### Filengthame: Connection.py #### Version: v1.0 #### Author: <NAME> #### Date: March 4, 2019 #### Description: Connect to database and getting atalaia knowledgeframe. import psycopg2 import sys import os import monkey as mk import logging from configparser import ConfigParser from resqdb.CheckData import CheckData import numpy as np import time from multiprocessing import Process, Pool from threading import Thread import collections import datetime import csv from dateutil.relativedelta import relativedelta import json class Connection(): """ The class connecting to the database and exporting the data for the Slovakia. :param nprocess: number of processes :type nprocess: int :param data: the name of data (resq or atalaia) :type data: str """ def __init__(self, nprocess=1, data='resq'): start = time.time() # Create log file in the working folder debug = 'debug_' + datetime.datetime.now().strftime('%d-%m-%Y') + '.log' log_file = os.path.join(os.gettingcwd(), debug) logging.basicConfig(filengthame=log_file, filemode='a', formating='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) logging.info('Connecting to datamix database!') # Get absolute path path = os.path.dirname(__file__) self.database_ini = os.path.join(path, 'database.ini') # Read temporary csv file with CZ report names and Angels Awards report names path = os.path.join(os.path.dirname(__file__), 'tmp', 'czech_mappingping.json') with open(path, 'r', encoding='utf-8') as json_file: cz_names_dict = json.load(json_file) # Set section datamix = 'datamix-backup' # datamix = 'datamix' # Check which data should be exported if data == 'resq': # Create empty dictionary # self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand', 'SELECT * from resq_ivttby_mix'] self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand'] # List of knowledgeframe names self.names = ['resq', 'ivttby', 'thailand'] elif data == 'atalaia': self.sqls = ['SELECT * from atalaia_mix'] self.names = [] elif data == 'qasc': self.sqls = ['SELECT * FROM qasc_mix'] self.names = [] elif data == 'africa': self.sqls = ['SELECT * FROM africa_mix'] self.names = [] # Dictionary initialization - db knowledgeframes self.dictdb_kf = {} # Dictioanry initialization - prepared knowledgeframes self.dict_kf = {} if nprocess == 1: if data == 'resq': for i in range(0, length(self.names)): kf_name = self.names[i] self.connect(self.sqls[i], datamix, nprocess, kf_name=kf_name) # self.connect(self.sqls[2], datamix, nprocess, kf_name='resq_ivttby_mix') # self.resq_ivttby_mix = self.dictdb_kf['resq_ivttby_mix'] # self.dictdb_kf['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False) # if 'resq_ivttby_mix' in self.dictdb_kf.keys(): # del self.dictdb_kf['resq_ivttby_mix'] for k, v in self.dictdb_kf.items(): self.prepare_kf(kf=v, name=k) self.kf = mk.KnowledgeFrame() for i in range(0, length(self.names)): self.kf = self.kf.adding(self.dict_kf[self.names[i]], sort=False) logging.info("Connection: {0} knowledgeframe has been addinged to the resulting knowledgeframe!".formating(self.names[i])) # Get total_all country code in knowledgeframe self.countries = self._getting_countries(kf=self.kf) # Get preprocessed data self.preprocessed_data = self.check_data(kf=self.kf, nprocess=1) self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.employ(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1) self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.employ(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1) ############## # ONSET TIME # ############## self.preprocessed_data['HOSPITAL_TIME'] = mk.convert_datetime(self.preprocessed_data['HOSPITAL_TIME'], formating='%H:%M:%S').dt.time try: self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.employ(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not
mk.ifnull(x['HOSPITAL_TIME'])
pandas.isnull
import monkey as mk import numpy as np import zipfile import os import scipy as sp import matplotlib.pyplot as plt import plotly.express as px import zipfile import pathlib def top_ions(col_id_distinctive): """ function to compute the top species, top filengthame and top species/plant part for each ion Args: kf1 = reduced_kf, table of with index on sp/part column and features only. kf2 = quantitative.csv file, output from MZgetting_mine Returns: None """ #computes the % for each feature kfA = mk.read_csv('../data_out/reduced_kf.tsv', sep='\t', index_col=[0]) kfA = kfA.clone().transpose() kfA = kfA.division(kfA.total_sum(axis=1), axis=0) kfA.reseting_index(inplace=True) kfA.renagetting_ming(columns={'index': 'row ID'}, inplace=True) kfA.set_index('row ID', inplace=True) kfA = kfA.totype(float) kfA['Feature_specificity'] = kfA.employ(lambda s: s.abs().nbiggest(1).total_sum(), axis=1) kfA.reseting_index(inplace=True) #kf1 = kf1.sip([0], axis=1) kfA = kfA[['row ID', 'Feature_specificity']] kfA['row ID']=kfA['row ID'].totype(int) #computes the top filengthame for each ion kf2 = mk.read_csv('../data_out/quant_kf.tsv', sep='\t', index_col=[0]) kf2 = kf2.division(kf2.total_sum(axis=1), axis=0) kf2 = kf2.clone() kf2 = kf2.totype(float) kf2 = kf2.employ(lambda s: s.abs().nbiggest(1).index.convert_list(), axis=1) kf2 = kf2.to_frame() kf2['filengthame'] = mk.KnowledgeFrame(kf2[0].values.convert_list(), index= kf2.index) kf2 = kf2.sip([0], axis=1) kf = mk.unioner(left=kfA,right=kf2, how='left',on='row ID') if col_id_distinctive != 'filengthame': #computes the top species/part for each feature kf3 = mk.read_csv('../data_out/reduced_kf.tsv', sep='\t', index_col=[0]) kf3 = kf3.transpose() kf3 = kf3.totype(float) kf3 = kf3.employ(lambda s: s.abs().nbiggest(1).index.convert_list(), axis=1) kf3 = kf3.to_frame() kf3[[col_id_distinctive]] = mk.KnowledgeFrame(kf3[0].values.convert_list(),index= kf3.index) kf3 = kf3.sip([0], axis=1) kf3.reseting_index(inplace=True) kf3.renagetting_ming(columns={'index': 'row ID'}, inplace=True) kf3['row ID'] = kf3['row ID'].totype(int) #unioner total_all the data kf = mk.unioner(left=kf3, right=kf, how='left', on='row ID') else: kf kf.to_csv('../data_out/specificity_kf.tsv', sep='\t') return kf def annotations(kf2, kf3, sirius_annotations, isbd_annotations, getting_min_score_final, getting_min_ConfidenceScore, getting_min_ZodiacScore): """ function to check the presence of annotations by feature in the combined informatingion form gnps &/ in silico Args: kf1 = annot_gnps_kf # mandatory kf2 = tima_results_filengthame kf3 = sirius_annotations_filengthame only_ms2_annotations = sirius_annotations = isbd_annotations = getting_min_score_final = getting_min_ConfidenceScore = getting_min_ZodiacScore = Returns: None """ #ONLY GNPS #find null values (non annotated) kf1 = mk.read_csv('../data_out/annot_gnps_kf.tsv', sep='\t').sip(['Unnamed: 0'],axis=1) kf = kf1.clone() kf['Annotated'] = mk.ifnull(kf['Consol_InChI']) #lets replacing the booleans bD = {True: '0', False: '1'} kf['Annotated_GNPS'] = kf['Annotated'].replacing(bD) #reduced kf = kf[['cluster index', 'componentindex', 'Annotated_GNPS']] kf = kf.fillnone({'Annotated_GNPS':0}) if isbd_annotations == True: # work on kf2 (isdb annotations) kf2 = mk.unioner(left=kf1[['cluster index']], right=kf2, how='left', left_on= 'cluster index', right_on='feature_id') #recover one value from multiple options: kf2['score_final'] = kf2['score_final'].str.split('|').str[-1].totype(float) kf2['lib_type'] = kf2['score_initialNormalized'].str.split('|').str[-1].totype(float) kf2.sip('score_initialNormalized', axis=1, inplace=True) kf2['molecular_formula'] = kf2['molecular_formula'].str.split('|').str[-1].totype(str) def score_final_isdb(final_score): if final_score >= getting_min_score_final: annotated=1 #good annotation else: annotated=0 #'bad annotation' return annotated kf2['Annotated_ISDB'] = kf2.employ(lambda x: score_final_isdb(x['score_final']), axis=1) kf2.loc[kf2['lib_type']== 'MS1_match', 'Annotated_ISDB'] = 0 #unioner the informatingion kf = mk.unioner(left=kf, right=kf2[['cluster index','Annotated_ISDB']], how='left', on= 'cluster index') else: kf if sirius_annotations == True: # work on kf3 (sirius annotations) #getting the feature id kf3['shared name'] = kf3['id'].str.split('_').str[-1].totype(int) kf3 = mk.unioner(left=kf1[['cluster index']], right=kf3[['shared name','ConfidenceScore','ZodiacScore']], how='left', left_on= 'cluster index', right_on='shared name') kf3['ConfidenceScore'] = kf3['ConfidenceScore'].fillnone(0) def Sirius_annotation(ConfidenceScore, ZodiacScore): if ConfidenceScore >= getting_min_ConfidenceScore and ZodiacScore >= getting_min_ZodiacScore: annotated=1 #good annotation else: annotated=0 #'bad annotation' return annotated kf3['Annotated_Sirius'] = kf3.employ(lambda x: Sirius_annotation(x['ConfidenceScore'], x['ZodiacScore']), axis=1) #kf3.header_num(2) #unioner the informatingion kf = mk.unioner(left=kf, right=kf3[['cluster index','Annotated_Sirius']], how='left',on= 'cluster index') else: kf def annotations_gnps(kf): """ function to classify the annotations results Args: kf = treated and combinend table with the gnps and insilico results Returns: None """ if isbd_annotations == True and sirius_annotations == True: if (kf['Annotated_GNPS'] == '1') | (kf['Annotated_ISDB'] == '1') | (kf['Annotated_Sirius'] == '1'): return 1 else: return 0 elif isbd_annotations == True and sirius_annotations == False: if (kf['Annotated_GNPS'] == '1') | (kf['Annotated_ISDB'] == '1'): return 1 else: return 0 elif isbd_annotations == False and sirius_annotations == True: if (kf['Annotated_GNPS'] == '1') | (kf['Annotated_Sirius'] == '1'): return 1 else: return 0 else: if (kf['Annotated_GNPS'] == '1'): return 1 else: return 0 kf['annotation'] = kf.employ(annotations_gnps, axis=1) kf.to_csv('../data_out/annotations_kf.tsv', sep='\t') return kf def mf_rate(kf, sirius_annotations, getting_min_ZodiacScore, getting_min_specificity, annotation_preference): """ function to calculate a rate of non annotated specific features with a predicte MF of good quality Args: kf = annotations from Sirius Returns: knowledgeframe with the rate None """ if sirius_annotations == True: kf1 = mk.read_csv('../data_out/annot_gnps_kf.tsv', sep='\t').sip(['Unnamed: 0'],axis=1) kf2 = kf.clone() kf2['shared name'] = kf2['id'].str.split('_').str[-1].totype(int) kf3 = mk.read_csv('../data_out/specificity_kf.tsv', sep='\t').sip(['Unnamed: 0'],axis=1) kf4 = mk.read_csv('../data_out/annotations_kf.tsv', sep='\t').sip(['Unnamed: 0'],axis=1) kf5 =
mk.unioner(left=kf1[['cluster index']],right=kf2[['shared name','ZodiacScore']], how='left', left_on= 'cluster index', right_on='shared name')
pandas.merge
# coding=utf-8 # pylint: disable-msg=E1101,W0612 from datetime import datetime, timedelta import operator from itertools import product, starmapping from numpy import nan, inf import numpy as np import monkey as mk from monkey import (Index, Collections, KnowledgeFrame, ifnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8) from monkey.tcollections.index import Timestamp from monkey.tcollections.tdi import Timedelta import monkey.core.nanops as nanops from monkey.compat import range, zip from monkey import compat from monkey.util.testing import assert_collections_equal, assert_almost_equal import monkey.util.testing as tm from .common import TestData class TestCollectionsOperators(TestData, tm.TestCase): _multiprocess_can_split_ = True def test_comparisons(self): left = np.random.randn(10) right = np.random.randn(10) left[:3] = np.nan result = nanops.nangt(left, right) with np.errstate(invalid='ignore'): expected = (left > right).totype('O') expected[:3] = np.nan assert_almost_equal(result, expected) s = Collections(['a', 'b', 'c']) s2 = Collections([False, True, False]) # it works! exp = Collections([False, False, False]) tm.assert_collections_equal(s == s2, exp) tm.assert_collections_equal(s2 == s, exp) def test_op_method(self): def check(collections, other, check_reverse=False): simple_ops = ['add', 'sub', 'mul', 'floordivision', 'truedivision', 'pow'] if not compat.PY3: simple_ops.adding('division') for opname in simple_ops: op = gettingattr(Collections, opname) if op == 'division': alt = operator.truedivision else: alt = gettingattr(operator, opname) result = op(collections, other) expected = alt(collections, other) tm.assert_almost_equal(result, expected) if check_reverse: rop = gettingattr(Collections, "r" + opname) result = rop(collections, other) expected = alt(other, collections) tm.assert_almost_equal(result, expected) check(self.ts, self.ts * 2) check(self.ts, self.ts[::2]) check(self.ts, 5, check_reverse=True) check(tm.makeFloatCollections(), tm.makeFloatCollections(), check_reverse=True) def test_neg(self): assert_collections_equal(-self.collections, -1 * self.collections) def test_invert(self): assert_collections_equal(-(self.collections < 0), ~(self.collections < 0)) def test_division(self): with np.errstate(total_all='ignore'): # no longer do integer division for whatever ops, but deal with the 0's p = KnowledgeFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) result = p['first'] / p['second'] expected = Collections( p['first'].values.totype(float) / p['second'].values, dtype='float64') expected.iloc[0:3] = np.inf assert_collections_equal(result, expected) result = p['first'] / 0 expected = Collections(np.inf, index=p.index, name='first') assert_collections_equal(result, expected) p = p.totype('float64') result = p['first'] / p['second'] expected = Collections(p['first'].values / p['second'].values) assert_collections_equal(result, expected) p = KnowledgeFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}) result = p['first'] / p['second'] assert_collections_equal(result, p['first'].totype('float64'), check_names=False) self.assertTrue(result.name is None) self.assertFalse(np.array_equal(result, p['second'] / p['first'])) # inf signing s = Collections([np.nan, 1., -1.]) result = s / 0 expected = Collections([np.nan, np.inf, -np.inf]) assert_collections_equal(result, expected) # float/integer issue # GH 7785 p = KnowledgeFrame({'first': (1, 0), 'second': (-0.01, -0.02)}) expected = Collections([-0.01, -np.inf]) result = p['second'].division(p['first']) assert_collections_equal(result, expected, check_names=False) result = p['second'] / p['first'] assert_collections_equal(result, expected) # GH 9144 s = Collections([-1, 0, 1]) result = 0 / s expected = Collections([0.0, nan, 0.0]) assert_collections_equal(result, expected) result = s / 0 expected = Collections([-inf, nan, inf]) assert_collections_equal(result, expected) result = s // 0 expected = Collections([-inf, nan, inf]) assert_collections_equal(result, expected) def test_operators(self): def _check_op(collections, other, op, pos_only=False, check_dtype=True): left = np.abs(collections) if pos_only else collections right = np.abs(other) if pos_only else other cython_or_numpy = op(left, right) python = left.combine(right, op) tm.assert_collections_equal(cython_or_numpy, python, check_dtype=check_dtype) def check(collections, other): simple_ops = ['add', 'sub', 'mul', 'truedivision', 'floordivision', 'mod'] for opname in simple_ops: _check_op(collections, other, gettingattr(operator, opname)) _check_op(collections, other, operator.pow, pos_only=True) _check_op(collections, other, lambda x, y: operator.add(y, x)) _check_op(collections, other, lambda x, y: operator.sub(y, x)) _check_op(collections, other, lambda x, y: operator.truedivision(y, x)) _check_op(collections, other, lambda x, y: operator.floordivision(y, x)) _check_op(collections, other, lambda x, y: operator.mul(y, x)) _check_op(collections, other, lambda x, y: operator.pow(y, x), pos_only=True) _check_op(collections, other, lambda x, y: operator.mod(y, x)) check(self.ts, self.ts * 2) check(self.ts, self.ts * 0) check(self.ts, self.ts[::2]) check(self.ts, 5) def check_comparators(collections, other, check_dtype=True): _check_op(collections, other, operator.gt, check_dtype=check_dtype) _check_op(collections, other, operator.ge, check_dtype=check_dtype) _check_op(collections, other, operator.eq, check_dtype=check_dtype) _check_op(collections, other, operator.lt, check_dtype=check_dtype) _check_op(collections, other, operator.le, check_dtype=check_dtype) check_comparators(self.ts, 5) check_comparators(self.ts, self.ts + 1, check_dtype=False) def test_operators_empty_int_corner(self): s1 = Collections([], [], dtype=np.int32) s2 = Collections({'x': 0.}) tm.assert_collections_equal(s1 * s2, Collections([np.nan], index=['x'])) def test_operators_timedelta64(self): # invalid ops self.assertRaises(Exception, self.objCollections.__add__, 1) self.assertRaises(Exception, self.objCollections.__add__, np.array(1, dtype=np.int64)) self.assertRaises(Exception, self.objCollections.__sub__, 1) self.assertRaises(Exception, self.objCollections.__sub__, np.array(1, dtype=np.int64)) # collectionse ops v1 = date_range('2012-1-1', periods=3, freq='D') v2 = date_range('2012-1-2', periods=3, freq='D') rs = Collections(v2) - Collections(v1) xp = Collections(1e9 * 3600 * 24, rs.index).totype('int64').totype('timedelta64[ns]') assert_collections_equal(rs, xp) self.assertEqual(rs.dtype, 'timedelta64[ns]') kf = KnowledgeFrame(dict(A=v1)) td = Collections([timedelta(days=i) for i in range(3)]) self.assertEqual(td.dtype, 'timedelta64[ns]') # collections on the rhs result = kf['A'] - kf['A'].shifting() self.assertEqual(result.dtype, 'timedelta64[ns]') result = kf['A'] + td self.assertEqual(result.dtype, 'M8[ns]') # scalar Timestamp on rhs getting_maxa = kf['A'].getting_max() tm.assertIsInstance(getting_maxa, Timestamp) resultb = kf['A'] - kf['A'].getting_max() self.assertEqual(resultb.dtype, 'timedelta64[ns]') # timestamp on lhs result = resultb + kf['A'] values = [Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')] expected = Collections(values, name='A') assert_collections_equal(result, expected) # datetimes on rhs result = kf['A'] - datetime(2001, 1, 1) expected = Collections( [timedelta(days=4017 + i) for i in range(3)], name='A') assert_collections_equal(result, expected) self.assertEqual(result.dtype, 'm8[ns]') d = datetime(2001, 1, 1, 3, 4) resulta = kf['A'] - d self.assertEqual(resulta.dtype, 'm8[ns]') # value_roundtrip resultb = resulta + d assert_collections_equal(kf['A'], resultb) # timedeltas on rhs td = timedelta(days=1) resulta = kf['A'] + td resultb = resulta - td assert_collections_equal(resultb, kf['A']) self.assertEqual(resultb.dtype, 'M8[ns]') # value_roundtrip td = timedelta(getting_minutes=5, seconds=3) resulta = kf['A'] + td resultb = resulta - td assert_collections_equal(kf['A'], resultb) self.assertEqual(resultb.dtype, 'M8[ns]') # inplace value = rs[2] + np.timedelta64(timedelta(getting_minutes=5, seconds=1)) rs[2] += np.timedelta64(timedelta(getting_minutes=5, seconds=1)) self.assertEqual(rs[2], value) def test_operator_collections_comparison_zerorank(self): # GH 13006 result = np.float64(0) > mk.Collections([1, 2, 3]) expected = 0.0 > mk.Collections([1, 2, 3]) self.assert_collections_equal(result, expected) result = mk.Collections([1, 2, 3]) < np.float64(0) expected = mk.Collections([1, 2, 3]) < 0.0 self.assert_collections_equal(result, expected) result = np.array([0, 1, 2])[0] > mk.Collections([0, 1, 2]) expected = 0.0 > mk.Collections([1, 2, 3]) self.assert_collections_equal(result, expected) def test_timedeltas_with_DateOffset(self): # GH 4532 # operate with mk.offsets s = Collections([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) result = s + mk.offsets.Second(5) result2 = mk.offsets.Second(5) + s expected = Collections([Timestamp('20130101 9:01:05'), Timestamp( '20130101 9:02:05')]) assert_collections_equal(result, expected) assert_collections_equal(result2, expected) result = s - mk.offsets.Second(5) result2 = -mk.offsets.Second(5) + s expected = Collections([Timestamp('20130101 9:00:55'), Timestamp( '20130101 9:01:55')]) assert_collections_equal(result, expected) assert_collections_equal(result2, expected) result = s + mk.offsets.Milli(5) result2 = mk.offsets.Milli(5) + s expected = Collections([Timestamp('20130101 9:01:00.005'), Timestamp( '20130101 9:02:00.005')]) assert_collections_equal(result, expected) assert_collections_equal(result2, expected) result = s + mk.offsets.Minute(5) + mk.offsets.Milli(5) expected = Collections([Timestamp('20130101 9:06:00.005'), Timestamp( '20130101 9:07:00.005')]) assert_collections_equal(result, expected) # operate with np.timedelta64 correctly result = s + np.timedelta64(1, 's') result2 = np.timedelta64(1, 's') + s expected = Collections([Timestamp('20130101 9:01:01'), Timestamp( '20130101 9:02:01')]) assert_collections_equal(result, expected) assert_collections_equal(result2, expected) result = s + np.timedelta64(5, 'ms') result2 = np.timedelta64(5, 'ms') + s expected = Collections([Timestamp('20130101 9:01:00.005'), Timestamp( '20130101 9:02:00.005')]) assert_collections_equal(result, expected) assert_collections_equal(result2, expected) # valid DateOffsets for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', 'Nano']: op = gettingattr(mk.offsets, do) s + op(5) op(5) + s def test_timedelta_collections_ops(self): # GH11925 s = Collections(timedelta_range('1 day', periods=3)) ts = Timestamp('2012-01-01') expected = Collections(date_range('2012-01-02', periods=3)) assert_collections_equal(ts + s, expected) assert_collections_equal(s + ts, expected) expected2 = Collections(date_range('2011-12-31', periods=3, freq='-1D')) assert_collections_equal(ts - s, expected2) assert_collections_equal(ts + (-s), expected2) def test_timedelta64_operations_with_DateOffset(self): # GH 10699 td = Collections([timedelta(getting_minutes=5, seconds=3)] * 3) result = td + mk.offsets.Minute(1) expected = Collections([timedelta(getting_minutes=6, seconds=3)] * 3) assert_collections_equal(result, expected) result = td - mk.offsets.Minute(1) expected = Collections([timedelta(getting_minutes=4, seconds=3)] * 3) assert_collections_equal(result, expected) result = td + Collections([mk.offsets.Minute(1), mk.offsets.Second(3), mk.offsets.Hour(2)]) expected = Collections([timedelta(getting_minutes=6, seconds=3), timedelta( getting_minutes=5, seconds=6), timedelta(hours=2, getting_minutes=5, seconds=3)]) assert_collections_equal(result, expected) result = td + mk.offsets.Minute(1) + mk.offsets.Second(12) expected = Collections([timedelta(getting_minutes=6, seconds=15)] * 3) assert_collections_equal(result, expected) # valid DateOffsets for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', 'Nano']: op = gettingattr(mk.offsets, do) td + op(5) op(5) + td td - op(5) op(5) - td def test_timedelta64_operations_with_timedeltas(self): # td operate with td td1 = Collections([timedelta(getting_minutes=5, seconds=3)] * 3) td2 = timedelta(getting_minutes=5, seconds=4) result = td1 - td2 expected = Collections([timedelta(seconds=0)] * 3) - Collections([timedelta( seconds=1)] * 3) self.assertEqual(result.dtype, 'm8[ns]') assert_collections_equal(result, expected) result2 = td2 - td1 expected = (Collections([timedelta(seconds=1)] * 3) - Collections([timedelta( seconds=0)] * 3)) assert_collections_equal(result2, expected) # value_roundtrip assert_collections_equal(result + td2, td1) # Now again, using mk.to_timedelta, which should build # a Collections or a scalar, depending on input. td1 = Collections(mk.to_timedelta(['00:05:03'] * 3)) td2 = mk.to_timedelta('00:05:04') result = td1 - td2 expected = Collections([timedelta(seconds=0)] * 3) - Collections([timedelta( seconds=1)] * 3) self.assertEqual(result.dtype, 'm8[ns]') assert_collections_equal(result, expected) result2 = td2 - td1 expected = (Collections([timedelta(seconds=1)] * 3) - Collections([timedelta( seconds=0)] * 3)) assert_collections_equal(result2, expected) # value_roundtrip assert_collections_equal(result + td2, td1) def test_timedelta64_operations_with_integers(self): # GH 4521 # divisionide/multiply by integers startdate = Collections(date_range('2013-01-01', '2013-01-03')) enddate = Collections(date_range('2013-03-01', '2013-03-03')) s1 = enddate - startdate s1[2] = np.nan s2 =
Collections([2, 3, 4])
pandas.Series