#  -*- coding: utf-8 -*-

from strategy.stock_pool.base_stock_pool import BaseStockPool
from factor.factor_module import FactorModule
from data.data_module import DataModule
from util.stock_util import judge_code_trading_date,get_diff_dates,calc_negative_diff_dates,get_sub_industry,get_choice_block_mom_value,get_choice_block_mom_rank
from pandas import DataFrame,Series
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
import scipy.signal as signal
import time
from sklearn import linear_model
import pickle
import os
from util.database import DB_CONN
from pymongo import UpdateOne,ASCENDING
from pathlib import Path
import talib as ta
from datetime import datetime
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures,scale
from sklearn.pipeline import Pipeline
import matplotlib.patches as mpatches
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from numpy import interp
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from itertools import cycle
from sklearn.ensemble import RandomForestClassifier
from util.database import base_code_path
"""
形态学人工复盘：
1，读取形态学人工复盘的数据，通过其中的基本信息去计算填补其他信息；
2，补充完成后，存入数据库
3，在visual模块去展示部分数据（第一涨幅位-止损位-比例-MA20偏离-RSI-RS强度）
4，定期去更新之前一些字段
"""

class PatternStockPool(BaseStockPool):
    dm = DataModule()
    fm = FactorModule()
    cur_date = datetime.now().strftime('%Y-%m-%d')
    cur_date = judge_code_trading_date(date=cur_date)

    def read_basic_info(self):
        self.file = Path(f"{base_code_path}/ManualStockPool.xlsx")

        data_df = pd.read_excel(self.file, sheet_name='PatternStrategy', sheencoding="gb2312", dtype=object)
        #print(data_df.head())

        change_col = {
            "形态起始时间":'pattern_begin_date',
            "首次复盘发现突破颈线时间":'find_date',
            "代码":'code',
            "名称":'name',
            "形态底部值":'down_value',
            "形态顶部值":'up_value',
            "涨幅计算起始值":"cal_begin_value",
            "突破时的颈线位值":'breaking_neck_value',
            "趋势启动时间":'trending_begin_date',
            "趋势结束时间":'trending_end_date',
            "形态类型":"pattern_type",
            "二次追盘起始位置":"2nd_trace_begin_date",
            "二次追盘结束位置":"2nd_trace_end_date",
            "是否已标注":"marked",
            "近期是否要跟踪":"tracked",
            "所属板块":"block",
            "换手率":"turnover_ratio",
            "逻辑分类":"logic_type",
            "逻辑分析":"logic_analyze"
        }
        data_df.rename(columns=change_col,inplace=True)
        data_df['pattern_begin_date'] = data_df['pattern_begin_date'].astype(str)
        data_df['find_date'] = data_df['find_date'].astype(str)
        data_df['trending_begin_date'] = data_df['trending_begin_date'].astype(str)
        data_df['trending_end_date'] = data_df['trending_end_date'].astype(str)
        data_df['2nd_trace_begin_date'] = data_df['2nd_trace_begin_date'].astype(str)
        data_df['2nd_trace_end_date'] = data_df['2nd_trace_end_date'].astype(str)
        data_df['code'] = data_df['code'].astype(str)
        data_df['block'] = data_df['block'].astype(str)

        #print(data_df.head())

        return data_df

    def cal_exp_price_ratio(self,dm,code,date,expect_price):
        date_df = dm.get_k_data(code,autype='qfq',begin_date=date,end_date=date)
        close = date_df.loc[0]['close']
        exp_ratio = round(100*(expect_price -close)/close,2)
        return exp_ratio

    def cal_stop_loss_ratio(self,dm,code,date,stop_loss_price):
        date_df = dm.get_k_data(code,autype='qfq',begin_date=date,end_date=date)
        close = date_df.loc[0]['close']
        stop_ratio = round(abs(100*(stop_loss_price -close)/close),2)
        return stop_ratio

    def calc_bias_ma_20(self,dm,fm,code,date):
        try:
            date_df = dm.get_k_data(code,autype='hfq',begin_date=date,end_date=date)
            close = date_df.loc[0]['close']
            fm_date_df = fm.get_single_stock_factors(code,"hfq_ma",False,date,date)
            hfq_ma_20 = fm_date_df.loc[0]['ma20']
            bias_ratio = round(100*(close - hfq_ma_20)/hfq_ma_20,2)
        except:
            bias_ratio = 0
        return bias_ratio

    def calc_rsi(self,dm,code,date):
        date_df = dm.get_k_data(code,autype='qfq')

        close = [float(x) for x in date_df['close']]
        date_df['RSI_6'] = ta.RSI(np.array(close), timeperiod=6)
        date_rsi_6 = round(float(date_df.loc[date_df['date'] == date]['RSI_6']),2)
        return date_rsi_6

    def calc_atr(self,dm,code,date):
        date_df = dm.get_k_data(code,autype='qfq')

        close = [float(x) for x in date_df['close']]
        high = [float(x) for x in date_df['high']]
        low = [float(x) for x in date_df['low']]
        date_df['atr_20'] = ta.ATR(np.array(high),np.array(low),np.array(close),timeperiod=20)
        date_atr_20 = round(float(date_df.loc[date_df['date'] == date]['atr_20']),2)
        return date_atr_20

    def calc_avg_rs(self,fm,code,date):
        fm_date_df = fm.get_single_stock_factors(code,"rs",False,date,date)
        avg_rs = 0
        try:
            total_rs = fm_date_df.loc[0]['rs_60'] + fm_date_df.loc[0]['rs_120'] + fm_date_df.loc[0]['rs_250']
            cnt = 3
            if fm_date_df.loc[0]['rs_60'] == -1:
                cnt -= 1
            if fm_date_df.loc[0]['rs_120'] == -1:
                cnt -= 1
            if fm_date_df.loc[0]['rs_250'] == -1:
                cnt -= 1
            avg_rs = round(total_rs/cnt,2)
        except:
            print(f"代码：{code},没获取到rs数据！")
        return avg_rs

    def calc_short_rs(self,fm,code,is_index,date):
        cnt = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)

            rs_5 = fm_date_df.loc[0].get('rs_5',0)
            rs_10 = fm_date_df.loc[0].get('rs_10',0)
            rs_20 = fm_date_df.loc[0].get('rs_20',0)
            if rs_5 >= 90:
                cnt += 1
            if rs_10 >= 90:
                cnt += 1
            if rs_20 >= 90:
                cnt += 1
        except:
            rs_5 = 0
            rs_10 = 0
            rs_20 = 0
        return cnt

    def calc_long_rs(self,fm,code,is_index,date):
        cnt = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)
            rs_60 = fm_date_df.loc[0].get('rs_60',0)
            rs_120 = fm_date_df.loc[0].get('rs_120',0)
            rs_250 = fm_date_df.loc[0].get('rs_250',0)
            if rs_60 >= 80:
                cnt += 1
            if rs_120 >= 80:
                cnt += 1
            if rs_250 >= 80:
                cnt += 1
        except:
            rs_60 = 0
            rs_120 = 0
            rs_250 = 0
        return cnt

    def calc_ma_up(self,fm,code,is_index,date):
        #计算均线多排
        try:
            begin_date = calc_negative_diff_dates(code=code,is_index=is_index,date=date,delta_days=-2)

            fm_date_df = fm.get_single_stock_factors(code,"hfq_ma",is_index,begin_date,date)

            ma5_date_2 = fm_date_df.loc[0].get('ma5',0)
            ma20_date_2 = fm_date_df.loc[0].get('ma20',0)
            ma99_date_2 = fm_date_df.loc[0].get('ma99',0)
            ma250_date_2 = fm_date_df.loc[0].get('ma250',0)

            ma5_date_1 = fm_date_df.loc[1].get('ma5',0)
            ma20_date_1 = fm_date_df.loc[1].get('ma20',0)
            ma99_date_1 = fm_date_df.loc[1].get('ma99',0)
            ma250_date_1 = fm_date_df.loc[1].get('ma250',0)

            ma5 = fm_date_df.loc[2].get('ma5',0)
            ma20 = fm_date_df.loc[2].get('ma20',0)
            ma99 = fm_date_df.loc[2].get('ma99',0)
            ma250 = fm_date_df.loc[2].get('ma250',0)

            if((ma5 >= ma5_date_1 >= ma5_date_2)
                and (ma20 >= ma20_date_1 >= ma20_date_2)
                and (ma99 >= ma99_date_1 >= ma99_date_2)
                and (ma250 >= ma250_date_1 >= ma250_date_2)
                and (ma5 >= ma20 >= ma99 >= ma250)):
                return 1

            else:
                return 0
        except Exception as e:
            return -1

    def calc_vol_ma_up(self,fm,code,is_index,date):
        #计算均线多排
        try:
            begin_date = calc_negative_diff_dates(code=code,is_index=is_index,date=date,delta_days=-2)

            fm_date_df = fm.get_single_stock_factors(code,"vol_ma",is_index,begin_date,date)

            ma5_date_2 = fm_date_df.loc[0].get('ma5',0)
            ma20_date_2 = fm_date_df.loc[0].get('ma20',0)


            ma5_date_1 = fm_date_df.loc[1].get('ma5',0)
            ma20_date_1 = fm_date_df.loc[1].get('ma20',0)

            ma5 = fm_date_df.loc[2].get('ma5',0)
            ma20 = fm_date_df.loc[2].get('ma20',0)

            if((ma5 >= ma5_date_1 >= ma5_date_2)
                and (ma20 >= ma20_date_1 >= ma20_date_2)
                and (ma5 >= ma20)):
                return 1

            else:
                return 0
        except Exception as e:
            return -1

    def calc_vol_ratio(self,row):
        up_value = row['up_value']
        down_value = row['down_value']
        df_daily = self.dm.get_k_data(row['code'], autype='qfq', begin_date=row['pattern_begin_date'], end_date=row['find_date'])
        if df_daily.index.size > 0:
            df_daily.set_index(['date'], 1, inplace=True)

            df_trading_daily = df_daily.loc[df_daily.is_trading == True, :]
            df_trading_daily_copy = df_trading_daily.copy()

            up_cnt = 0
            total_up_vol = 0
            down_cnt = 0
            total_down_vol = 0
            avg_up_vol = 0
            avg_down_vol = 0
            vol_ratio = 0
            for index, data in df_trading_daily_copy.iterrows():
                if abs(data['close'] - up_value)/up_value < 0.05:
                    total_up_vol += data['volume']
                    up_cnt += 1
                if abs(data['close'] - down_value)/down_value < 0.05:
                    total_down_vol += data['volume']
                    down_cnt += 1
            if up_cnt != 0:
                avg_up_vol = round(total_up_vol/up_cnt,2)
            if down_cnt != 0:
                avg_down_vol = round(total_down_vol/down_cnt,2)
            if avg_down_vol != 0:
                vol_ratio = round(avg_up_vol/avg_down_vol,2)

            #if vol_ratio == 0:
                #print(row)

        return vol_ratio

    def get_vol_ratio_5(self,row):
        try:
            vol_df = self.fm.get_single_stock_factors(row['code'], 'vol_ratio', False, row['find_date'], row['find_date'])
            return vol_df.loc[0]['vol_ratio_5']
        except:
            print(f"get_vol_ratio_5 error code: {row['code']}")
            return 0

    def div_ratio(self,row):
        if(row['stop_ratio'] == 0):
            return 0

        ratio = round(row['exp_ratio'] / row['stop_ratio'], 2)
        return ratio

    def get_rs_5(self,fm,code,is_index,date):
        rs = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)
            rs = fm_date_df.loc[0]['rs_5']
        except:
            print(f"代码：{code},没获取到rs5数据！")
        return rs

    def get_rs_10(self,fm,code,is_index,date):
        rs = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)
            rs = fm_date_df.loc[0]['rs_10']
        except:
            print(f"代码：{code},没获取到rs10数据！")
        return rs
    def get_rs_20(self,fm,code,is_index,date):
        rs = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)
            rs = fm_date_df.loc[0]['rs_20']
        except:
            print(f"代码：{code},没获取到rs20数据！")
        return rs
    def get_rs_60(self,fm,code,is_index,date):
        rs = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)
            rs = fm_date_df.loc[0]['rs_60']
        except:
            print(f"代码：{code},没获取到rs60数据！")
        return rs
    def get_rs_120(self,fm,code,is_index,date):
        rs = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)
            rs = fm_date_df.loc[0]['rs_120']
        except:
            print(f"代码：{code},没获取到rs120数据！")
        return rs
    def get_rs_250(self,fm,code,is_index,date):
        rs = 0
        try:
            fm_date_df = fm.get_single_stock_factors(code, "rs", is_index, date, date)
            rs = fm_date_df.loc[0]['rs_250']
        except:
            print(f"代码：{code},没获取到rs250数据！")
        return rs

    def calc_bias_neck_value(self,dm,code,neck_value,date):
        date_df = dm.get_k_data(code,autype='qfq',begin_date=date,end_date=date)
        close = date_df.loc[0]['close']
        bias_ratio = round(100*(close - neck_value)/neck_value,2)
        return bias_ratio

    def fill_value(self,data_df):

        data_df['expect_price'] = data_df.apply(lambda row:round(row['cal_begin_value'] + row['up_value'] - row['down_value'],2),axis=1)
        data_df['stop_loss_price'] = data_df.apply(lambda row:round(row['breaking_neck_value']*0.97,2),axis=1)
        data_df['bias_neck_value'] = data_df.apply(lambda row: self.calc_bias_neck_value(self.dm,row['code'],row['breaking_neck_value'],row['find_date']), axis=1)
        data_df['diff_dates'] = data_df.apply(lambda row: get_diff_dates(row['code'],row['pattern_begin_date'],row['find_date']), axis=1)
        data_df['exp_ratio'] = data_df.apply(lambda row: self.cal_exp_price_ratio(self.dm,row['code'], row['find_date'],row['expect_price']), axis=1)
        data_df['stop_ratio'] = data_df.apply(lambda row: self.cal_stop_loss_ratio(self.dm,row['code'], row['find_date'],row['stop_loss_price']), axis=1)
        data_df['risk_reward_ratio'] = data_df.apply(lambda row: self.div_ratio(row), axis=1)
        data_df['bias_ma20_ratio'] = data_df.apply(lambda row: self.calc_bias_ma_20(self.dm,self.fm,row['code'], row['find_date']), axis=1)
        data_df['RSI_6'] = data_df.apply(lambda row: self.calc_rsi(self.dm,row['code'], row['find_date']), axis=1)
        data_df['atr_20'] = data_df.apply(lambda row: self.calc_atr(self.dm,row['code'], self.cur_date), axis=1)
        data_df['rs'] = data_df.apply(lambda row: self.calc_avg_rs(self.fm,row['code'], row['find_date']), axis=1)
        data_df['short_rs'] = data_df.apply(lambda row: self.calc_short_rs(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['long_rs'] = data_df.apply(lambda row: self.calc_long_rs(self.fm,row['code'],False, row['find_date']), axis=1)

        data_df['block_short_rs'] = data_df.apply(lambda row: self.calc_short_rs(self.fm,row['block'],True, row['find_date']), axis=1)
        data_df['block_long_rs'] = data_df.apply(lambda row: self.calc_long_rs(self.fm,row['block'],True, row['find_date']), axis=1)
        data_df['cur_bias_ma20_ratio'] = data_df.apply(lambda row: self.calc_bias_ma_20(self.dm,self.fm,row['code'], self.cur_date), axis=1)
        data_df['cur_bias_neck_value'] = data_df.apply(lambda row: self.calc_bias_neck_value(self.dm,row['code'],row['breaking_neck_value'],self.cur_date), axis=1)
        data_df['cur_RSI_6'] = data_df.apply(lambda row: self.calc_rsi(self.dm,row['code'], self.cur_date), axis=1)
        data_df['cur_rs'] = data_df.apply(lambda row: self.calc_avg_rs(self.fm,row['code'], self.cur_date), axis=1)
        data_df['cur_short_rs'] = data_df.apply(lambda row: self.calc_short_rs(self.fm,row['code'], False,self.cur_date), axis=1)
        data_df['cur_long_rs'] = data_df.apply(lambda row: self.calc_long_rs(self.fm,row['code'], False,self.cur_date), axis=1)
        data_df['cur_MA_up'] = data_df.apply(lambda row: self.calc_ma_up(self.fm,row['code'], False,self.cur_date), axis=1)
        data_df['cur_block_short_rs'] = data_df.apply(lambda row: self.calc_short_rs(self.fm,row['block'],True, self.cur_date), axis=1)
        data_df['cur_block_long_rs'] = data_df.apply(lambda row: self.calc_long_rs(self.fm,row['block'],True, self.cur_date), axis=1)

        data_df['choice_block_name'] = data_df.apply(lambda row: get_sub_industry(row['code']), axis=1)
        data_df['choice_block_mom_value'] = data_df.apply(lambda row: get_choice_block_mom_value(row['choice_block_name'], row['find_date']), axis=1)
        data_df['choice_block_mom_rank'] = data_df.apply(lambda row: get_choice_block_mom_rank(row['choice_block_name'], row['find_date']), axis=1)
        data_df['cur_choice_block_mom_value'] = data_df.apply(lambda row: get_choice_block_mom_value(row['choice_block_name'], self.cur_date), axis=1)
        data_df['cur_choice_block_mom_rank'] = data_df.apply(lambda row: get_choice_block_mom_rank(row['choice_block_name'], self.cur_date), axis=1)


        data_df['rs_5'] = data_df.apply(lambda row: self.get_rs_5(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['rs_10'] = data_df.apply(lambda row: self.get_rs_10(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['rs_20'] = data_df.apply(lambda row: self.get_rs_20(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['rs_60'] = data_df.apply(lambda row: self.get_rs_60(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['rs_120'] = data_df.apply(lambda row: self.get_rs_120(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['rs_250'] = data_df.apply(lambda row: self.get_rs_250(self.fm,row['code'],False, row['find_date']), axis=1)

        data_df['cur_rs_5'] = data_df.apply(lambda row: self.get_rs_5(self.fm,row['code'],False, self.cur_date), axis=1)
        data_df['cur_rs_10'] = data_df.apply(lambda row: self.get_rs_10(self.fm,row['code'],False, self.cur_date), axis=1)
        data_df['cur_rs_20'] = data_df.apply(lambda row: self.get_rs_20(self.fm,row['code'],False, self.cur_date), axis=1)
        data_df['cur_rs_60'] = data_df.apply(lambda row: self.get_rs_60(self.fm,row['code'],False, self.cur_date), axis=1)
        data_df['cur_rs_120'] = data_df.apply(lambda row: self.get_rs_120(self.fm,row['code'],False, self.cur_date), axis=1)
        data_df['cur_rs_250'] = data_df.apply(lambda row: self.get_rs_250(self.fm,row['code'],False, self.cur_date), axis=1)

        data_df['block_rs_5'] = data_df.apply(lambda row: self.get_rs_5(self.fm,row['block'],True, row['find_date']), axis=1)
        data_df['block_rs_10'] = data_df.apply(lambda row: self.get_rs_10(self.fm,row['block'],True, row['find_date']), axis=1)
        data_df['block_rs_20'] = data_df.apply(lambda row: self.get_rs_20(self.fm,row['block'],True, row['find_date']), axis=1)
        data_df['block_rs_60'] = data_df.apply(lambda row: self.get_rs_60(self.fm,row['block'],True, row['find_date']), axis=1)
        data_df['block_rs_120'] = data_df.apply(lambda row: self.get_rs_120(self.fm,row['block'],True, row['find_date']), axis=1)
        data_df['block_rs_250'] = data_df.apply(lambda row: self.get_rs_250(self.fm,row['block'],True, row['find_date']), axis=1)


        data_df['MA_up'] = data_df.apply(lambda row: self.calc_ma_up(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['block_MA_up'] = data_df.apply(lambda row: self.calc_ma_up(self.fm,row['block'],True, row['find_date']), axis=1)

        data_df['vol_MA_up'] = data_df.apply(lambda row: self.calc_vol_ma_up(self.fm,row['code'],False, row['find_date']), axis=1)
        data_df['vol_block_MA_up'] = data_df.apply(lambda row: self.calc_vol_ma_up(self.fm,row['block'],True, row['find_date']), axis=1)

        data_df['t_raise_ratio'] = data_df.apply(lambda row: self.calc_t_raise_ratio(self.dm,row['code'],row['trending_begin_date'], row['trending_end_date']), axis=1)
        data_df.loc[data_df['pattern_type'] == '多重底','pattern_type_value'] = 1
        data_df.loc[data_df['pattern_type'] == '头肩底','pattern_type_value'] = 2
        data_df.loc[data_df['pattern_type'] == '双底','pattern_type_value'] = 3
        data_df.loc[data_df['pattern_type'] == '弧形底','pattern_type_value'] = 4
        data_df.loc[data_df['pattern_type'] == '三重底','pattern_type_value'] = 5
        data_df.loc[data_df['pattern_type'] == '下降三角','pattern_type_value'] = 6
        data_df.loc[data_df['pattern_type'] == '水平旗形','pattern_type_value'] = -1
        data_df.loc[data_df['pattern_type'] == '下飘旗形','pattern_type_value'] = -2
        data_df['up_down_vol_ratio'] = data_df.apply(lambda row: self.calc_vol_ratio(row),axis=1)

        data_df['vol_ratio'] = data_df.apply(lambda row: self.get_vol_ratio_5(row),axis=1)
        #print(data_df.head())
        data_df.to_csv('data_1125.csv')
        return

    def write_pattern_data_to_db(self,data_df):

        update_requests = list()
        collection = DB_CONN['pattern_strategy_option_stocks']
        # 建立code+date的索引，提高save_data时写入数据的查询速度
        collection.create_index([('code', 1),('find_date', 1)])
        for index, row in data_df.iterrows():
            update_requests.append(
                UpdateOne(
                    {'index': index},
                    {'$set': dict(row)},
                    upsert=True)
            )
        # print(update_requests)
        # 批量写入，提高访问效率
        if len(update_requests) > 0:
            start_time = time.time()
            update_result = collection.bulk_write(update_requests, ordered=False)
            end_time = time.time()
            print('保存形态学复盘数据到数据集：%s，插入：%4d条, 更新：%4d条,耗时：%.3f 秒' %
                  (collection.name, update_result.upserted_count, update_result.modified_count,
                   (end_time - start_time)),
                  flush=True)

        return

    def read_pattern_data_from_db(self):
        collection = DB_CONN['pattern_strategy_option_stocks']

        data_cursor = collection.find(
            sort=[('find_date', ASCENDING)],
            projection={'_id': False})
        data_df = DataFrame([x for x in data_cursor])
        return data_df

    def calc_t_raise_ratio(self,dm,code,t_bg_date,t_end_date):
        t_raise_ratio = 0
        if t_bg_date != 'nan' and t_end_date != 'nan':
            date_df = dm.get_k_data(code,autype='qfq',begin_date=t_bg_date,end_date=t_end_date)
            t_bg_close = date_df.iloc[0]['close']
            t_end_close = date_df.iloc[-1]['close']
            t_raise_ratio = round(100*(t_end_close - t_bg_close)/t_bg_close,2)

        return t_raise_ratio

    def calc_Y(self,find_date,t_bg_date):
        Y = 0
        if t_bg_date != 'nan':
            if find_date == t_bg_date:
                Y = 1
            else:
                Y = 1
        return Y

    def data_process_for_ai(self):
        data_df = self.read_pattern_data_from_db()
        data_df['Y'] = data_df.apply(lambda row: self.calc_Y(row['find_date'],row['trending_begin_date']), axis=1)
        #print(data_df.head())
        #筛选出底部形态，有用的指标
        ai_df = data_df.loc[(data_df['pattern_type_value'] > 0) & (data_df['rs'] > 0)]
        #ai_df.to_csv("ai.csv")
        #print(ai_df.head(),ai_df.index.size)
        #对训练集，测试集，预测集一起做scale
        scale_x = ai_df[['diff_dates','bias_ma20_ratio','RSI_6','rs','vol_ratio','MA_up','short_rs','long_rs']]
        #print(scale_x)
        test_x = scale_x[ai_df['marked'] == 1]
        test_y = ai_df.loc[ai_df['marked'] == 1,'Y']
        #print(test_x.size)

        x_train, x_test, y_train, y_test = train_test_split(test_x, test_y, test_size=0.3)

        #model = LogisticRegression()
        #model = GaussianNB()
        #model = KNeighborsClassifier()
        #model = DecisionTreeClassifier(min_samples_split=15,min_samples_leaf=5)
        model = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=15, min_samples_split=15,min_samples_leaf=5)
        #model = GridSearchCV(rf_model,n_jobs=-1, param_grid={'n_estimators':np.arange(30,200), 'max_depth':np.arange(3,15),'min_samples_split':np.arange(5,20),'min_samples_leaf':np.arange(3,10)})
        #model = SVC(probability=True)

        model.fit(x_train, y_train)
        #测试集的期望值（真实值）
        expected = y_test
        #测试集的预测值和预测概率
        predicted = model.predict(x_test)
        probed = model.predict_proba(x_test)
        #print(probed)

        len_predicted = len(predicted)
        print(metrics.classification_report(expected, predicted))
        print(metrics.confusion_matrix(expected, predicted))

        #生成测试集的预测结果对比csv
        probed_y_df = DataFrame(probed, index=range(len_predicted), columns=['prob_0', 'prob_1'])
        probed_y_df['probed_1'] = probed_y_df.apply(lambda  row:round(row['prob_1'],2),axis=1)
        #probed_y_df['probed_2'] = probed_y_df.apply(lambda  row:round(row['prob_2'],2),axis=1)

        test_y_pre_df = DataFrame(data_df.loc[y_test.index, ['name','Y','find_date','t_raise_ratio']].values,columns=['name','Y','find_date','t_raise_ratio'], index=range(len_predicted))
        test_y_pre_df.insert(1, 'predictedY', Series(predicted))
        test_y_pre_df.insert(2, 'probed_1', probed_y_df['probed_1'])
        #test_y_pre_df.insert(3, 'probed_2', probed_y_df['probed_2'])
        test_y_pre_df.to_csv('测试集预测结果对比.csv')

        print("------------对未标注的数据进行预测----------------------------")
        #选出未标注的计算数据
        expect_x = scale_x[ai_df['marked'] == 0]
        exp_y_df = ai_df.loc[ai_df['marked'] ==0,'Y']
        #对未标注数据进行预测，和输出预测概率
        predict_y = model.predict(expect_x)
        prob_y = model.predict_proba(expect_x)
        len_predict_y = len(predict_y)
        #从原始数据中选出未标注数据
        pre_y_df = DataFrame(data_df.loc[exp_y_df.index, ['name','Y','find_date','t_raise_ratio']].values, columns=['name','Y','find_date','t_raise_ratio'],index=range(len_predict_y))
        pre_y_ser = Series(predict_y)
        pre_prob_y_df = DataFrame(prob_y, index=range(len_predict_y), columns=['prob_0', 'prob_1'])
        pre_prob_y_df['probed_1'] = pre_prob_y_df.apply(lambda  row:round(row['prob_1'],2),axis=1)

        pre_y_df.insert(1, 'expY1', pre_y_ser)
        pre_y_df.insert(2, 'expY1_prob_1', pre_prob_y_df['probed_1'])
        #pre_y_df.insert(3, 'expY1_prob_2', pre_prob_y_df['prob_2'])
        print(pre_y_df)


        return


    def get_option_stocks(self):
        flag = 1
        if flag == 1:
            #step1 从excel读取数据，并建立基础的data_df格式
            data_df = self.read_basic_info()

            #step2 填充data_df
            self.fill_value(data_df)

            #step3 写入数据库
            self.write_pattern_data_to_db(data_df)

        #step4 AI处理Data_df
        self.data_process_for_ai()

        return

if __name__ == '__main__':
    pd.set_option('display.width', 130)
    pd.set_option('display.max_columns', 130)
    pd.set_option('display.max_colwidth', 130)

    mp = PatternStockPool("Pattern",None,None,1)
    mp.get_option_stocks()
