#  -*- coding: utf-8 -*-

from strategy.stock_pool.base_stock_pool import BaseStockPool
from factor.factor_module import FactorModule
from data.data_module import DataModule
from util.stock_util import judge_code_trading_date,get_diff_dates,calc_negative_diff_dates,get_sub_industry,get_choice_block_mom_value,get_choice_block_mom_rank
from pandas import DataFrame,Series
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
import scipy.signal as signal
import time
from sklearn import linear_model
import pickle
import os
from util.database import DB_CONN
from pymongo import UpdateOne,ASCENDING
from pathlib import Path
import talib as ta
from datetime import datetime
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures,scale
from sklearn.pipeline import Pipeline
import matplotlib.patches as mpatches
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from numpy import interp
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from itertools import cycle
from sklearn.ensemble import RandomForestClassifier
from util.database import base_code_path

"""
2022-02-12：在形态学基础上，再次找到走势最完美的形态，分析建立模型，主要分析的是二次追盘形态。
完美形态学人工复盘：
1，读取完美形态学人工复盘的数据，通过其中的基本信息去计算填补其他信息；
2，补充完成后，存入数据库
3，在visual模块去展示部分数据
4，定期去更新之前一些字段
"""

class PerfectPatternStockPool(BaseStockPool):
    dm = DataModule()
    fm = FactorModule()
    cur_date = datetime.now().strftime('%Y-%m-%d')
    cur_date = judge_code_trading_date(date=cur_date)

    def read_basic_info(self):
        self.file = Path(f"{base_code_path}/ManualStockPool.xlsx")

        data_df = pd.read_excel(self.file, sheet_name='PerfectPattern', sheencoding="gb2312", dtype=object)
        #print(data_df.head())

        change_col = {
            "买点时间":'buy_point_date',
            "买卖点分类":'buy_point_category',
            "代码":'code',
            "名称":'name',
            "形态起始时间":'pattern_begin_date',
            "形态结束时间":'pattern_end_date',
            "形态":"pattern_type",
            "是否成功":"success",
        }
        data_df.rename(columns=change_col,inplace=True)
        data_df['pattern_begin_date'] = data_df['pattern_begin_date'].astype(str)
        data_df['pattern_end_date'] = data_df['pattern_end_date'].astype(str)
        data_df['buy_point_date'] = data_df['buy_point_date'].astype(str)
        data_df['code'] = data_df['code'].astype(str)

        #print(data_df.head())

        return data_df

    def calc_atr(self,code,date,begin_date,end_date):
        begin_date = calc_negative_diff_dates(code=code, is_index=False, date=begin_date, delta_days=-1)
        date_df = self.dm.get_k_data(code,autype='qfq',begin_date=begin_date, end_date=end_date)

        close = [float(x) for x in date_df['close']]
        high = [float(x) for x in date_df['high']]
        low = [float(x) for x in date_df['low']]
        date_df['atr_1'] = ta.ATR(np.array(high),np.array(low),np.array(close),timeperiod=1)
        date_atr_1 = round(float(date_df.loc[date_df['date'] == date]['atr_1']),2)
        date_atr_1_ratio = round(100*date_atr_1/float(date_df.loc[date_df['date'] == date]['close']),2)
        return date_atr_1,date_atr_1_ratio

    def get_qfq_data(self,code,begin_date,end_date):
        date_df = self.dm.get_k_data(code, autype='qfq', begin_date=begin_date, end_date=end_date)
        date_df['abs_close_ratio'] = 0.00
        date_df['close_ratio'] = 0.00
        date_df['difference_ratio'] = 0.00
        date_df['abs_difference_ratio'] = 0.00
        date_df['atr_1_difference_ratio'] = 0.00
        date_df['abs_atr_1_difference_ratio'] = 0.00
        date_df[['atr_1','atr_1_ratio']] = date_df.apply(lambda row: self.calc_atr(row['code'], row['date'],begin_date,end_date), axis=1,result_type="expand")
        #print(date_df.head())
        date_df_copy = date_df.copy()

        for index, row in date_df.iterrows():
            #print(f"index:{index},row:{row}")
            if index == 0 :
                date_df_copy.loc[index,'abs_close_ratio'] = 0.00
                date_df_copy.loc[index,'close_ratio'] = 0.00
                date_df_copy.loc[index,'abs_difference_ratio'] = 0.00
                date_df_copy.loc[index,'difference_ratio'] = 0.00
                date_df_copy.loc[index,'atr_1_difference_ratio'] = 0.00
                date_df_copy.loc[index,'abs_atr_1_difference_ratio'] = 0.00

            else:
                pre_close = date_df_copy.loc[index-1,'close']
                close = date_df_copy.loc[index,'close']
                date_df_copy.loc[index,'abs_close_ratio'] = abs(round((close - pre_close) *100/pre_close ,2))
                date_df_copy.loc[index,'close_ratio'] = round((close - pre_close) *100/pre_close ,2)
                close_ratio = round((close - pre_close) *100/pre_close ,2)
                atr_1_ratio = date_df_copy.loc[index,'atr_1_ratio']
                if index == 1:
                    date_df_copy.loc[index, 'difference_ratio'] = 0.00
                    date_df_copy.loc[index, 'abs_difference_ratio'] = 0.00
                    date_df_copy.loc[index, 'atr_1_difference_ratio'] = 0.00
                    date_df_copy.loc[index, 'abs_atr_1_difference_ratio'] = 0.00
                else:
                    date_df_copy.loc[index, 'abs_difference_ratio'] = abs(round(close_ratio - last_close_ratio,2))
                    date_df_copy.loc[index, 'difference_ratio'] = round(close_ratio - last_close_ratio,2)
                    date_df_copy.loc[index, 'abs_atr_1_difference_ratio'] = abs(round(atr_1_ratio - last_atr_1_ratio,2))
                    date_df_copy.loc[index, 'atr_1_difference_ratio'] = round(atr_1_ratio - last_atr_1_ratio,2)
                last_atr_1_ratio = atr_1_ratio
                last_close_ratio = close_ratio

        return date_df_copy

    def calc_mean_noun(self,row):

        date_df = self.get_qfq_data(row['code'],row['pattern_begin_date'],row['pattern_end_date'])
        #计算除去第一行和最后一行，也就是区间两端数据后的涨幅的均值方差
        mean_ratio = round(date_df.loc[1:len(date_df)-2,'abs_close_ratio'].mean(),2)
        noun_ratio = round(date_df.loc[1:len(date_df)-2,'abs_close_ratio'].std(),2)

        #计算除去第一第二，以及最后一行，也就是两端+第一个左端数据后的涨幅差分的均值方差，因为左端第一个值没差分值
        mean_difference_ratio = round(date_df.loc[2:len(date_df)-2,'difference_ratio'].mean(),2)
        abs_mean_difference_ratio = round(date_df.loc[2:len(date_df)-2,'abs_difference_ratio'].mean(),2)

        noun_difference_ratio = round(date_df.loc[2:len(date_df)-2,'difference_ratio'].std(),2)
        abs_noun_difference_ratio = round(date_df.loc[2:len(date_df)-2,'abs_difference_ratio'].std(),2)

        #计算除去第一第二，以及最后一行，也就是两端+第一个左端数据后的ATR绝对值差分的均值方差，因为左端第一个值没差分值
        mean_atr_1_ratio = round(date_df.loc[2:len(date_df)-2,'abs_atr_1_difference_ratio'].mean(),2)
        noun_atr_1_ratio = round(date_df.loc[2:len(date_df)-2,'abs_atr_1_difference_ratio'].std(),2)

        #print(f"mean_difference_ratio:{mean_difference_ratio},noun_difference_ratio:{noun_difference_ratio}")
        #date_df.to_csv(f"{row['code']}_data_{self.cur_date}.csv")

        #最好一日的涨幅（突破日）
        close_ratio =  date_df.loc[len(date_df)-1,'close_ratio']
        #print(f"last day code:{row['code']} close ratio:{close_ratio}")

        #涨幅绝对值大于4%的天数比例
        over_4_days = len(date_df[date_df['abs_close_ratio'] >= 4])
        over_4_days_ratio = round(over_4_days/(row['last_days']-2),2)
        #print(f"code:{row['code']},over_4_days:{over_4_days},last_days:{row['last_days']},ratio:{over_4_days_ratio}")

        #两端位置（最高点差异）
        begin_high = date_df.loc[0,'high']
        end_high = date_df.loc[len(date_df)-1,'high']
        diff_high = round((end_high - begin_high)/begin_high,2)

        return mean_ratio,noun_ratio,\
               abs_mean_difference_ratio,abs_noun_difference_ratio,\
               mean_atr_1_ratio,noun_atr_1_ratio,\
               close_ratio,over_4_days_ratio,diff_high


    def calc_ma_up(self,fm,code,is_index,date):
        #计算均线多排
        cnt = 0
        over_ma = 0
        begin_date = calc_negative_diff_dates(code=code,is_index=is_index,date=date,delta_days=-1)

        fm_date_df = fm.get_single_stock_factors(code,"hfq_ma",is_index,begin_date,date)
        date_df = self.dm.get_k_data(code, autype='hfq', begin_date=date, end_date=date)
        close = date_df.loc[0]['close']
        #print(f"code:{code},date:{date},close:{close}")

        ma5_date_1 = fm_date_df.loc[0].get('ma5',0)
        ma20_date_1 = fm_date_df.loc[0].get('ma20',0)
        ma60_date_1 = fm_date_df.loc[0].get('ma60',0)
        ma99_date_1 = fm_date_df.loc[0].get('ma99',0)
        ma250_date_1 = fm_date_df.loc[0].get('ma250',0)

        ma5 = fm_date_df.loc[1].get('ma5',0)
        ma20 = fm_date_df.loc[1].get('ma20',0)
        ma60 = fm_date_df.loc[1].get('ma60',0)
        ma99 = fm_date_df.loc[1].get('ma99',0)
        ma250 = fm_date_df.loc[1].get('ma250',0)

        if ma5 >= ma5_date_1:
            cnt += 1
        if ma20 >= ma20_date_1:
            cnt += 1
        if ma60 >= ma60_date_1:
            cnt += 1
        if ma99 >= ma99_date_1:
            cnt += 1
        if ma250 >= ma250_date_1:
            cnt += 1

        # if cnt >= 4:
        #     return 1
        # else:
        if close >= ma5 and close >= ma20 and close >= ma60 and close >= ma99 and close >= ma250:
            over_ma = 1
        return cnt,over_ma


    def get_choice_block_mom_value(self,row):

        sub_industry = ""
        code = row['code']
        begin_date = row['pattern_begin_date']
        end_date = row['pattern_end_date']
        begin_mom_value = 0
        end_mom_value = 0
        try:
            stable_cursor = DB_CONN['stable'].find(
                {'code': code, 'index': False},
                projection={'_id': False},
                batch_size=1000)

            sub_industry = stable_cursor[0]['sub_industry']
        except:
            print(f"get_sub_industry error code:{code}")

        momentum_cursor = DB_CONN['momentum'].find(
            {'name': sub_industry, 'date': begin_date, 'type': 'sub_industry', "origin": 'choice'},
            projection={'code_list': False, '_id': False})
        if momentum_cursor.count() > 0:
            begin_mom_value = momentum_cursor[0]['momentum_value']
            #print(f"code:{code},date:{begin_date},begin_mom_value:{begin_mom_value}")

        momentum_cursor = DB_CONN['momentum'].find(
            {'name': sub_industry, 'date': end_date, 'type': 'sub_industry', "origin": 'choice'},
            projection={'code_list': False, '_id': False})
        if momentum_cursor.count() > 0:
            end_mom_value = momentum_cursor[0]['momentum_value']
            #print(f"code:{code},date:{end_date},end_mom_value:{end_mom_value}")

        return begin_mom_value,end_mom_value


    def get_rs(self,code,date):

        fm_date_df = self.fm.get_single_stock_factors(code, "rs", False, date, date)
        if 'rs_5' not in fm_date_df.columns:
            rs_5 = 0
        else:
            rs_5 = fm_date_df.loc[0]['rs_5']

        if 'rs_10' not in fm_date_df.columns:
            rs_10 = 0
        else:
            rs_10 = fm_date_df.loc[0]['rs_10']

        if 'rs_20' not in fm_date_df.columns:
            rs_20 = 0
        else:
            rs_20 = fm_date_df.loc[0]['rs_20']

        if 'rs_60' not in fm_date_df.columns:
            rs_60 = 0
        else:
            rs_60 = fm_date_df.loc[0]['rs_60']

        if 'rs_120' not in fm_date_df.columns:
            rs_120 = 0
        else:
            rs_120 = fm_date_df.loc[0]['rs_120']

        if 'rs_250' not in fm_date_df.columns:
            rs_250 = 0
        else:
            rs_250 = fm_date_df.loc[0]['rs_250']

        short_rs = max(rs_5,rs_10,rs_20)
        long_rs = max(rs_60,rs_120,rs_250)

        return short_rs,long_rs

    def fill_value(self,data_df):

        #形态持续时间
        data_df['last_days'] = data_df.apply(lambda row: get_diff_dates(row['code'],row['pattern_begin_date'],row['pattern_end_date']), axis=1)

        #区间涨幅绝对值的均值，标准差（方差开方，与均值量纲一致），差分的均值，标准差
        data_df[['mean_abs_close_ratio','noun_abs_close_ratio',
                 'mean_difference_ratio','noun_difference_ratio',
                 'mean_atr_1_ratio', 'noun_atr_1_ratio',
                 'close_ratio','over_4_days_ratio','diff_high'
                 ]] = \
            data_df.apply(lambda row: self.calc_mean_noun(row), axis=1,result_type="expand")

        #多排判断
        data_df[['cur_MA_up','over_ma']] = data_df.apply(lambda row: self.calc_ma_up(self.fm,row['code'], False,row['pattern_end_date']), axis=1,result_type="expand")

        #所属板块动量值
        data_df[['block_begin_mom_value','block_end_mom_value']] = data_df.apply(lambda row: self.get_choice_block_mom_value(row), axis=1,result_type="expand")

        #形态两端RS
        data_df[['begin_short_rs','begin_long_rs']] = data_df.apply(lambda row: self.get_rs(row['code'],row['pattern_begin_date']), axis=1,result_type="expand")
        data_df[['end_short_rs','end_long_rs']] = data_df.apply(lambda row: self.get_rs(row['code'],row['pattern_end_date']), axis=1,result_type="expand")


        #print(data_df.head())
        data_df.to_csv(f'perfect_pattern_data_{self.cur_date}.csv')
        return

    def write_pattern_data_to_db(self,data_df):

        update_requests = list()
        collection = DB_CONN['perfect_pattern_strategy_option_stocks']
        # 建立code+date的索引，提高save_data时写入数据的查询速度
        collection.create_index([('code', 1),('buy_point_date', 1)])
        for index, row in data_df.iterrows():
            update_requests.append(
                UpdateOne(
                    {'index': index},
                    {'$set': dict(row)},
                    upsert=True)
            )
        # print(update_requests)
        # 批量写入，提高访问效率
        if len(update_requests) > 0:
            start_time = time.time()
            update_result = collection.bulk_write(update_requests, ordered=False)
            end_time = time.time()
            print('保存完美形态学复盘数据到数据集：%s，插入：%4d条, 更新：%4d条,耗时：%.3f 秒' %
                  (collection.name, update_result.upserted_count, update_result.modified_count,
                   (end_time - start_time)),
                  flush=True)

        return

    def read_pattern_data_from_db(self):
        collection = DB_CONN['perfect_pattern_strategy_option_stocks']

        data_cursor = collection.find(
            sort=[('buy_point_date', ASCENDING)],
            projection={'_id': False})
        data_df = DataFrame([x for x in data_cursor])
        return data_df

    def calc_Y(self,success):
        if success == 1:
            Y = 1
        elif success == 2:
            Y = 1
        elif success == 0:
            Y = 0
        else:
            Y = -1
        return Y

    def data_process_for_ai(self):
        data_df = self.read_pattern_data_from_db()
        data_df['Y'] = data_df.apply(lambda row: self.calc_Y(row['success']), axis=1)
        #print(data_df.head())

        #print(ai_df.head(),ai_df.index.size)
        #对训练集，测试集，预测集一起做scale
        ai_df = data_df.copy()
        ai_df.to_csv("ai.csv")

        scale_x = ai_df[['mean_abs_close_ratio','noun_abs_close_ratio',
                        'mean_difference_ratio','noun_difference_ratio',
                        'mean_atr_1_ratio','noun_atr_1_ratio',
                        'close_ratio','last_days','cur_MA_up','diff_high',
                        'over_4_days_ratio','over_ma',
                        'block_begin_mom_value','block_end_mom_value',
                        'begin_short_rs','begin_long_rs','end_short_rs','end_long_rs']]

        #print(scale_x)
        test_x = scale_x[ai_df['Y'] >= 0]
        test_y = ai_df.loc[ai_df['Y'] >= 0,'Y']
        #print(test_x.size)

        x_train, x_test, y_train, y_test = train_test_split(test_x, test_y, test_size=0.3)

        #model = LogisticRegression()
        #model = GaussianNB()
        #model = KNeighborsClassifier()
        #model = DecisionTreeClassifier(min_samples_split=15,min_samples_leaf=5)
        model = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=15, min_samples_split=15,min_samples_leaf=5)
        #model = GridSearchCV(rf_model,n_jobs=-1, param_grid={'n_estimators':np.arange(30,200), 'max_depth':np.arange(3,15),'min_samples_split':np.arange(5,20),'min_samples_leaf':np.arange(3,10)})
        #model = SVC(probability=True)

        model.fit(x_train, y_train)
        #测试集的期望值（真实值）
        expected = y_test
        #测试集的预测值和预测概率
        predicted = model.predict(x_test)
        probed = model.predict_proba(x_test)
        #print(probed)

        len_predicted = len(predicted)
        print(metrics.classification_report(expected, predicted))
        print(metrics.confusion_matrix(expected, predicted))

        #生成测试集的预测结果对比csv
        # probed_y_df = DataFrame(probed, index=range(len_predicted), columns=['prob_0', 'prob_1'])
        # probed_y_df['probed_1'] = probed_y_df.apply(lambda  row:round(row['prob_1'],2),axis=1)
        # #probed_y_df['probed_2'] = probed_y_df.apply(lambda  row:round(row['prob_2'],2),axis=1)
        #
        # test_y_pre_df = DataFrame(data_df.loc[y_test.index, ['name','Y','find_date','t_raise_ratio']].values,columns=['name','Y','find_date','t_raise_ratio'], index=range(len_predicted))
        # test_y_pre_df.insert(1, 'predictedY', Series(predicted))
        # test_y_pre_df.insert(2, 'probed_1', probed_y_df['probed_1'])
        # #test_y_pre_df.insert(3, 'probed_2', probed_y_df['probed_2'])
        # test_y_pre_df.to_csv('测试集预测结果对比.csv')

        print("------------对未标注的数据进行预测----------------------------")
        #选出未标注的计算数据
        expect_x = scale_x[ai_df['Y'] == -1]
        exp_y_df = ai_df.loc[ai_df['Y'] ==-1,'Y']
        #对未标注数据进行预测，和输出预测概率
        predict_y = model.predict(expect_x)
        prob_y = model.predict_proba(expect_x)
        len_predict_y = len(predict_y)
        #从原始数据中选出未标注数据
        pre_y_df = DataFrame(data_df.loc[exp_y_df.index, ['name','Y','buy_point_date']].values, columns=['name','Y','buy_point_date'],index=range(len_predict_y))
        pre_y_ser = Series(predict_y)
        pre_prob_y_df = DataFrame(prob_y, index=range(len_predict_y), columns=['prob_0', 'prob_1'])
        pre_prob_y_df['probed_1'] = pre_prob_y_df.apply(lambda  row:round(row['prob_1'],2),axis=1)
        #pre_prob_y_df['probed_2'] = pre_prob_y_df.apply(lambda  row:round(row['prob_2'],2),axis=1)

        pre_y_df.insert(1, 'expY', pre_y_ser)
        pre_y_df.insert(2, 'expY1_prob_1', pre_prob_y_df['probed_1'])
        #pre_y_df.insert(3, 'expY1_prob_2', pre_prob_y_df['probed_2'])
        print(pre_y_df)


        return


    def get_option_stocks(self):
        flag = 1
        if flag == 1:
            #step1 从excel读取数据，并建立基础的data_df格式
            data_df = self.read_basic_info()

            #step2 填充data_df
            start_time = time.time()
            self.fill_value(data_df)
            end_time = time.time()
            print(f"填充数据耗时：{round(end_time - start_time,2)}秒")
            #step3 写入数据库
            self.write_pattern_data_to_db(data_df)

        #step4 AI处理Data_df
        self.data_process_for_ai()

        return

if __name__ == '__main__':
    pd.set_option('display.width', 130)
    pd.set_option('display.max_columns', 130)
    pd.set_option('display.max_colwidth', 130)

    mp = PerfectPatternStockPool("Pattern",None,None,1)
    mp.get_option_stocks()
