# -*- coding: utf-8 -*-
import time

import pandas as pd
import numpy as np
import pymysql
from pandas import DataFrame, concat
from sklearn import metrics
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB, BaseDiscreteNB, MultinomialNB
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import classification_report, auc, roc_curve
from sklearn.cluster import KMeans  # 引入KMeans
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm  # 字体管理器

from TuShare.TUShareObject import TUShareObject


codes = []#
with open("codes.txt", mode = 'r', encoding='utf8') as fi:
    codes = fi.readlines()

dates = ['20180102', '20180402', '20180702', '20181008', '20190102', '20190401', '20190701', '20191008', '20200102']
predict_price_dates = ['20190401', '20190701', '20191008', '20200102']

def rename_cols( datas : DataFrame , ):
    # cols  = datas.columns.values.tolist()
    # rename_mapper = dict()
    # for col in cols:
    #     rename_mapper[col] = col.replace("(元)", "").replace("(元\\股)", "")

    datas.rename(lambda x : x.replace(")_", "_"), axis= "columns", inplace=True)

def plot_roc(labels, predict_prob):
    false_positive_rate,true_positive_rate,thresholds=roc_curve(labels, predict_prob)
    roc_auc=auc(false_positive_rate, true_positive_rate)
    plt.title('ROC')
    plt.plot(false_positive_rate, true_positive_rate,'b',label='AUC = %0.4f'% roc_auc)
    plt.legend(loc='lower right')
    plt.plot([0,1],[0,1],'r--')
    plt.ylabel('TPR')
    plt.xlabel('FPR')
    plt.show()

def main():
    # df = pd.read_excel(r"G:\Download\股票历史数据\复盘数据1.xls","DRESSTK_2016_")
    # print(df.columns)
    # 对股票进行分组
    # 格式化code
    # 求KDJ
    # 使用技术数据进行选取
    first_ji_data = pd.read_excel(r"G:\Download\股票历史数据\5G股票的分季度数据.xls", "1")
    rename_cols(first_ji_data)
    # print(first_ji_data.columns)
    allowed_col = ('复权价1_AdjClpr1', '市盈率_PE', '市净率_PB', '市现率_PCF', '市销率_PS', '每股收益(摊薄)_EPS','净资产收益率(摊薄)_ROE','每股净资产_NAPS', )
    assert set(allowed_col) == set(first_ji_data.columns.values.tolist()) & set(allowed_col), "缺少必要列的，缺少 : {}".format(set(allowed_col) - set(first_ji_data.columns.values.tolist()))
    first_ji_data.dropna(axis = 0 , how = 'any' , subset = ['复权价1_AdjClpr1'], inplace=True)

    # 补齐收益率参数
    next_ji_data = pd.read_excel(r"G:\Download\股票历史数据\5G股票的分季度数据.xls", "2")
    rename_cols(next_ji_data)
    assert set(allowed_col) == set(next_ji_data.columns.values.tolist()) & set(allowed_col), "缺少必要列的，缺少 : {}".format(set(allowed_col) - set(next_ji_data.columns.values.tolist()))
    next_ji_data.dropna(axis = 0 , how = 'any' , subset = ['复权价1_AdjClpr1'], inplace=True)

    # 补充列
    new_first_ji_data = first_ji_data.merge(right=next_ji_data,how = 'inner',on = "股票代码_Stkcd" , copy=True, suffixes=("","_next_ji"))
    new_first_ji_data['持有一季度的收益率'] = new_first_ji_data.apply(lambda x: (x['复权价1_AdjClpr1_next_ji']  -  x['复权价1_AdjClpr1']) / x['复权价1_AdjClpr1_next_ji'], axis=1)
    assert '持有一季度的收益率' in new_first_ji_data.columns;


def download_yearly_report_tushare(year = 18):
    pro_api = TUShareObject().instance()
    name_map = {"revenue_ps":"每股收益", "bps":"每股净资产", "capital_rese_ps":"每股资本公积金", "op_of_gr":"营业利润率", "roa2_yearly":"资产报酬率", "roe_yearly":"净资产收益率", "roe_yoy":"净资产增长率", "assets_turn":"总资产周转率", "debt_to_assets":"资产负债率"}
    need_cols = set(list(name_map.keys()))
    need_col_list = list()
    need_col_list.extend(need_cols)

    all_datas = DataFrame()

    # try:
    for c_ in codes:
        # for suf_ in [".SH",".SZ"]:
            code_ = c_
            # code_ = c_ + suf_
            datas = pro_api.fina_indicator_vip(ts_code=code_,
                                               # )
                                                start_date="20{}1231".format(year) , end_date='20{}1231'.format(year))
            data_cols = datas.columns.values.tolist()
            assert need_cols == need_cols & set(data_cols),"缺少列 :{}".format(need_cols - (need_cols & set(data_cols)))
            a = datas

            # if not datas[need_col_list].isnull().empty :
                # print()
                # print("{} 中某个单元为none {}".format("code",datas[need_col_list].isnull()))

            if not datas.empty:
                # datas.loc[datas.ts_code == code_].ts_code = c_
                all_datas = concat([all_datas, datas], axis = 0)
            time.sleep(1)
    # except Exception as e:
    #     print(repr(e))

    a = all_datas
    all_datas.to_excel(r"G:\Download\股票历史数据\\{}年财务数据.xls".format(year))


def download_price_tushare(year = 18):
    tus_obj = TUShareObject()
    ts = tus_obj.origin_instance()
    pro_api = tus_obj.instance()
    name_map = {"ts_code":"股票代码", "trade_date":"交易日期", "close":"收盘价",}
    need_cols = set(list(name_map.keys()))
    need_col_list = list()
    need_col_list.extend(need_cols)

    all_datas = DataFrame()

    # try:
    for c_ in codes:
        # for suf_ in [".SH",".SZ"]:
            for date_ in dates:
            # for day_mon_ in ['0102','1231']:
                code_ = c_
                # code_ = c_ + suf_
                # datas = pro_api.pro_bar(ts_code='000001.SH', asset='I', start_date='20180101', end_date='20181011'
                    # pro_bar(ts_code=code_,adj='hfq',
                    #                                )
                                                    # start_date="20{}{}".format(year, day_mon_) , end_date='20{}{}'.format(year,day_mon_))
                datas = ts.pro_bar(ts_code=code_, api=pro_api, adj='hfq',
                                   start_date=date_ , end_date=date_)
                                   # asset='E')
                if  not (DataFrame ==  type(datas)):
                    continue
                data_cols = datas.columns.values.tolist()
                assert need_cols == need_cols & set(data_cols),"缺少列 :{}".format(need_cols - (need_cols & set(data_cols)))
                a = datas

                # if not datas[need_col_list].isnull().empty :
                    # print()
                    # print("{} 中某个单元为none {}".format("code",datas[need_col_list].isnull()))

                if not datas.empty:
                    # datas.loc[datas.ts_code == code_].ts_code = c_
                    all_datas = concat([all_datas, datas], axis = 0)
                time.sleep(1)
    # except Exception as e:
    #     print(repr(e))

    a = all_datas
    all_datas.to_excel(r"G:\Download\股票历史数据\\20{}-20{}季度股票价格数据1.xls".format(year , year+1))


def discretization(tezheng_datas):
    # 2、实例化一个转换器类
    transfer = MinMaxScaler(feature_range=[1, 2])  # 默认的feature_range是[0, 1]

    # 3、调用fit_transform 删除量纲
    # data_new = DataFrame(transfer.fit_transform(tezheng_datas[["bps", 'op_of_gr', 'roa2_yearly', 'roe_yoy',]]))
    data_new = DataFrame(transfer.fit_transform(tezheng_datas[["bps", 'op_of_gr', 'roa2_yearly', 'roe_yoy','profit_to_gr','dp_assets_to_eqt','ca_to_assets']]))
    # print("data_new:n", data_new)
    data_new.rename(columns={0: "bps", 1: "op_of_gr", 2: 'roa2_yearly', 3: 'roe_yoy', 4:'profit_to_gr',5:'dp_assets_to_eqt',6:'ca_to_assets'}, inplace=True)
    # data_new.rename(columns={0: "bps", 1: "op_of_gr", 2: 'roa2_yearly', 3: 'roe_yoy'}, inplace=True)

    # data_new = tezheng_datas
    # # fixme 添加去除包含空值的行
    cut_2_k = 3
    for col_name_ in data_new.columns:
        # fixme 替换为去除这行数据
        data_new[col_name_].fillna(data_new[col_name_].mean(), inplace=True)
        # data_one_col = data_new[[col_name_]].values
        # 基于聚类分析的方法 进行离散化
        # from sklearn.cluster import KMeans  # 引入KMeans
        # kmodel = KMeans(n_clusters=cut_2_k, n_jobs=4)  # 建立模型，n_jobs是并行数，一般等于CPU数较好
        # kmodel.fit(data_one_col.reshape((len(data_one_col), 1)))  # 训练模型
        # c = pd.DataFrame(kmodel.cluster_centers_).sort_values(0)  # 输出聚类中心，并且排序（默认是随机序的）
        # w = pd.Series.rolling(c, 2).mean().iloc[1:]  # 相邻两项求中点，作为边界点
        # w = [0] + list(w[0]) + [data_one_col.max()]  # 把首末边界点加上，w[0]中0为列索引
        # d3 = pd.cut(data_one_col.ravel(), w, labels=range(cut_2_k), include_lowest=True)

        # data_new[col_name_] = pd.Series(d3)
    #
    #     # fixme jiejue Panduan
    #     # assert data_new[col_name_].isnull().any(), "存在空值"
    # return data_new, d3, cut_2_k

    # PCA 降维
    pca2 = PCA(n_components=4)
    data_new = pca2.fit_transform(data_new)
    print(pca2.explained_variance_ratio_)    # 返回所保留的n个成分各自的方差百分比
    print("sum pca rate: {}".format(sum(pca2.explained_variance_ratio_)))
    print(pca2.explained_variance_)

    # data_new['return_rate'] = tezheng_datas['return_rate']

    # 基于聚类分析的方法 进行离散化
    # kmodel = KMeans(n_clusters=cut_2_k, n_jobs=4)  # 建立模型，n_jobs是并行数，一般等于CPU数较好
    # for idx_ in range(data_new.shape[1]):
    #     data_one_col_  = data_new[..., idx_].reshape(-1, 1)
    #     kmodel.fit(data_one_col_, 1)  # 训练模型
    #     c = pd.DataFrame(kmodel.cluster_centers_).sort_values(0)  # 输出聚类中心，并且排序（默认是随机序的）
        # print(kmodel.labels_)
        # w = pd.Series.rollings(c, 2).mean().iloc[1:]  # 相邻两项求中点，作为边界点
        # w = [0] + list(w[0]) + [data_one_col_.max()]  # 把首末边界点加上，w[0]中0为列索引
        # w.sort()
        # d3 = pd.cut(data_one_col_.ravel(), w, labels=range(cut_2_k), include_lowest=True)
        # d3 = kmodel.labels_.reshape(-1, )
        # data_new[..., idx_] = d3

    return data_new, tezheng_datas.return_rate.ravel().tolist(), 0 , 0

def filter_unkown_jidu(jidu_price_all_datas, filter_datas : DataFrame):
    dates = ['20180102', '20180402', '20180702', '20181008', '20190102', '20190401', '20190701', '20191008', '20200102']
    prodict_dates = ['20190401', '20190701', '20191008', '20200102']
    join_datas = jidu_price_all_datas[jidu_price_all_datas['trade_date'].astype(str) == dates[0]]
    suffix = ''
    for idx_, date_ in enumerate(dates):
        suffix += '_y'
        if idx_ < 1:
            continue
        join_datas = pd.merge(left=join_datas,right= jidu_price_all_datas[jidu_price_all_datas['trade_date'].astype(str) == date_], how='inner' , on='ts_code',
                              suffixes=('',suffix))

    # 取交集
    distinct_codes = join_datas['ts_code'].astype(str).unique().tolist()
    res_datas = filter_datas[filter_datas['ts_code'].astype(str).isin(distinct_codes)]  # 返回的数据的索引被改变
    res_datas.reset_index(drop=True, inplace=True)
    return res_datas

POSTIVE_RATE = 0.5
def main_tu_year():

    # 每股净资产、 主营业务利润率、 净资产收益 率、 净资产增长率 bps op_of_gr roa2_yearly roe_yoy
    jidu_price_datas = pd.read_excel(r"G:/Download/股票历史数据/2018-2019季度股票价格数据.xls")
    year_reports_17_datas = filter_unkown_jidu(jidu_price_datas, pd.read_excel(r"G:\Download\股票历史数据\17年财务数据.xls"))
    sql_ = """select distinct SB.ts_code as ts_code, tag from code_tag_date left outer join stockbasic AS SB ON SB.symbol = code_tag_date.code"""
    con = pymysql.connect(host='localhost', user='root', password='h303567469', database='stock_tag_manager', charset='utf8',
                          use_unicode=True)
    stock_classic_datas = pd.read_sql(sql=  sql_, con=con)
    # dates = ['20180102', '20180402', '20180702', '20181008', '20190102', '20190401', '20190701', '20191008', '20200102']
    # 保证code都存在

    start_2018_year_price = filter_unkown_jidu(jidu_price_datas, jidu_price_datas[
        jidu_price_datas['trade_date'].astype(str) == '20180102'])
    end_2018_year_price = filter_unkown_jidu(jidu_price_datas,
                                             jidu_price_datas[jidu_price_datas['trade_date'].astype(str) == '20190102'])

    # 两个表进行连接 使用join
    union_year_price = pd.merge(left=end_2018_year_price, right=start_2018_year_price,
                                how='inner', on='ts_code',
                                suffixes=('_end', '_start'),
                                validate='one_to_one')

    union_year_price['return_rate'] = (union_year_price['close_end'] - union_year_price['close_start']) / \
                                      union_year_price[
                                          'close_start']
    year_reports_17_datas = pd.merge(left=year_reports_17_datas, right=union_year_price[['return_rate', 'ts_code']], how='inner',
                                     on='ts_code'
                                     , validate='1:1')

    year_reports_17_datas.sort_values(by=['return_rate', ], inplace=True, ascending=False)

    # 取前25% 做+1 后75%为 -1

    cur_cnt = 0
    for idx_, row_ in year_reports_17_datas.iterrows():
        if cur_cnt >= year_reports_17_datas.shape[0] * POSTIVE_RATE:
            year_reports_17_datas.loc[idx_, 'return_rate'] = -1
        else:
            year_reports_17_datas.loc[idx_, 'return_rate'] = 1
        cur_cnt += 1

    assert year_reports_17_datas['return_rate'].isin([-1, 1])[0], '分类后的值不正确'
    #
    # # 进行离散化
    xt_, yt_, d3, cut_2_k = discretization(year_reports_17_datas)

    Xtrain = xt_
    Ytrain = yt_

    # 训练模型
    gnb = GaussianNB().fit(Xtrain, Ytrain)
    print("训练集 得分")
    assert not np.unique(gnb.predict(Xtrain)).size == 1, '出现明显错误：预测结果只有1类'
    print(classification_report(Ytrain, gnb.predict(Xtrain),labels=[1,-1]))
    # return 0
    # ==========下一年的作为测试集===========
    year_reports_18_datas = filter_unkown_jidu(jidu_price_datas, pd.read_excel(r"G:\Download\股票历史数据\18年财务数据.xls"))
    # dates = ['20180102', '20180402', '20180702', '20181008', '20190102', '20190401', '20190701', '20191008', '20200102']
    # 保证code都存在

    start_2019_year_price = filter_unkown_jidu(jidu_price_datas, jidu_price_datas[
        jidu_price_datas['trade_date'].astype(str) == '20190102'])
    end_2019_year_price = filter_unkown_jidu(jidu_price_datas,
                                             jidu_price_datas[jidu_price_datas['trade_date'].astype(str) == '20200102'])

    # 两个表进行连接 使用join
    union_year_price = pd.merge(left=end_2019_year_price, right=start_2019_year_price,
                                how='inner', on='ts_code',
                                suffixes=('_end', '_start'),
                                validate='one_to_one')

    union_year_price['return_rate'] = (union_year_price['close_end'] - union_year_price['close_start']) / \
                                      union_year_price[
                                          'close_start']
    year_reports_18_datas = pd.merge(left=year_reports_18_datas, right=union_year_price[['return_rate', 'ts_code']], how='inner',
                                     on='ts_code'
                                     , validate='1:1')

    year_reports_18_datas.sort_values(by=['return_rate', ], inplace=True, ascending=False)

    # 取前25% 做+1 后75%为 -1
    cur_cnt = 0
    for idx_, row_ in year_reports_18_datas.iterrows():
        if cur_cnt >= year_reports_18_datas.shape[0] * POSTIVE_RATE:
            year_reports_18_datas.loc[idx_, 'return_rate'] = -1
        else:
            year_reports_18_datas.loc[idx_, 'return_rate'] = 1
        cur_cnt += 1

    assert year_reports_18_datas['return_rate'].isin([-1, 1])[0], '分类后的值不正确'
    #
    # # 进行离散化
    xt_, yt_, d3, cut_2_k = discretization(year_reports_18_datas)

    Xtest = xt_
    Ytest = yt_

    Ytest_predict = gnb.predict(Xtest)

    # 查看预测的概率结果
    print("测试集 ----------------")
    print(classification_report(Ytest, gnb.predict(Xtest),labels=[1,-1]))

    # 分季度查看选中的股票收益率 fixme 补充为每个交易日的收益率？
    year_reports_18_datas['predict_type'] = Ytest_predict.reshape(-1, 1)
    year_reports_18_datas_predict_rate = year_reports_18_datas.copy(deep=True)
    year_19_start = '20190102'
    year_reports_18_datas_predict_rate = pd.merge(left=year_reports_18_datas_predict_rate,
                                                  right=jidu_price_datas[
                                                      jidu_price_datas['trade_date'].astype(str) == year_19_start][
                                                      ['ts_code', 'close']],
                                                  how='inner', on='ts_code',
                                                  validate='one_to_one')

    predict_tag_rates = {}
    allwoed_tag = ['金融', '医药', '军工','云经济','华为','化工','半导体','无线耳机','5G']
    for predict_date_ in predict_price_dates:
        year_reports_18_datas_predict_rate = pd.merge(left=year_reports_18_datas_predict_rate,
                                                      right= jidu_price_datas[
                                                          jidu_price_datas['trade_date'].astype(str) == predict_date_][['ts_code','close']],
                                                      how='inner', on='ts_code',
                                                      suffixes=('', '_{}'.format(predict_date_)),
                                                      validate='one_to_one')
        close_price_col_name_ = 'close_{}'.format(predict_date_)
        year_reports_18_datas_predict_rate_postive = year_reports_18_datas_predict_rate[year_reports_18_datas_predict_rate['predict_type'] ==  +1]
        purshare_reach_reate = (year_reports_18_datas_predict_rate_postive[close_price_col_name_] - year_reports_18_datas_predict_rate_postive['close'])/year_reports_18_datas_predict_rate_postive['close'] * 100
        one_purshare_reach_rate = purshare_reach_reate.sum()/(1+purshare_reach_reate.shape[0])
        # if False:
        for tg_ in stock_classic_datas['tag'].unique():
            if not tg_ in allwoed_tag:
                continue
            selected_scd = stock_classic_datas[stock_classic_datas['tag'] == tg_]
            selected_scd = pd.merge(left=selected_scd,
                                    right=year_reports_18_datas_predict_rate_postive,
                                    how='inner', on='ts_code',
                                    validate='one_to_one')
            selected_scd.dropna(axis=0,how='any',inplace=True)
            if selected_scd.shape[0] == 0:
                continue
            avg_sum = ((selected_scd[close_price_col_name_] - selected_scd['close'])/selected_scd['close']).sum() / (1 + selected_scd.shape[0]) * 100
            sum = ((selected_scd[close_price_col_name_] - selected_scd['close'])/selected_scd['close']).sum() * 100
            print("date: {} tag : {} avg sum reach rate: {}, sum reach rate :{}".format(predict_date_, tg_, avg_sum, sum))
            print("size : {} ".format(selected_scd.shape))

            predict_tag_rates.setdefault(tg_, [])
            predict_tag_rates[tg_].append({"avg_sum":avg_sum, "sum" : sum})

        print("purshare end date: {} sum reach rate : {}".format(predict_date_, one_purshare_reach_rate))
        # year_reports_18_datas_predict_rate['close'] = year_reports_18_datas_predict_rate[close_price_col_name_]
    # if False:
    print("sum :", predict_tag_rates)
    # 按照avg_sum 进行排序
    avg_sum_lists = []
    sum_lists = []
    for tg_ in stock_classic_datas['tag'].unique():
        if not tg_ in predict_tag_rates or not tg_ in allwoed_tag:
            continue
        avg_sum_lists.append([tg_, predict_tag_rates[tg_][len(predict_tag_rates[tg_]) - 1]['avg_sum']])
        sum_lists.append([tg_, predict_tag_rates[tg_][len(predict_tag_rates[tg_]) - 1]['sum']])

    avg_sum_lists = sorted(avg_sum_lists, key = lambda k : k[1])
    sum_lists = sorted(sum_lists, key = lambda k : k[1])

    # 按sum进行排序
    print("avg_sum :{} \n sum :{}".format(avg_sum_lists, sum_lists))

    # 画出收益率的曲线
    cnames = ['aqua','beige','bisque','black','blanchedalmond','blue','blueviolet','brown','burlywood','cadetblue','chartreuse','chocolate','coral','cornflowerblue','cornsilk','crimson','cyan','darkblue','darkcyan','darkgoldenrod','darkgray','darkgreen','darkkhaki','darkmagenta','darkolivegreen','darkorange','darkorchid','darkred','darksalmon','darkseagreen','darkslateblue','darkslategray','darkturquoise','darkviolet','deeppink','deepskyblue','dimgray','dodgerblue','firebrick','floralwhite','forestgreen','fuchsia','gainsboro','ghostwhite','gold','goldenrod','gray','green','greenyellow','honeydew','hotpink','indianred','indigo','ivory','khaki','lavender','lavenderblush','lawngreen','lemonchiffon','lightblue','lightcoral','lightcyan','lightgoldenrodyellow','lightgreen','lightgray','lightpink','lightsalmon','lightseagreen','lightskyblue','lightslategray','lightsteelblue','lightyellow','lime','limegreen','linen','magenta','maroon','mediumaquamarine','mediumblue','mediumorchid','mediumpurple','mediumseagreen','mediumslateblue','mediumspringgreen','mediumturquoise','mediumvioletred','midnightblue','mintcream','mistyrose','moccasin','navajowhite','navy','oldlace','olive','olivedrab','orange','orangered','orchid','palegoldenrod','palegreen','paleturquoise','palevioletred','papayawhip','peachpuff','peru','pink','plum','powderblue','purple','red','rosybrown','royalblue','saddlebrown','salmon','sandybrown','seagreen','seashell','sienna','silver','skyblue','slateblue','slategray','snow','springgreen','steelblue','tan','teal','thistle','tomato','turquoise','violet','wheat','white','whitesmoke','yellow','yellowgreen']
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
    x_data = predict_price_dates

    lines = []
    labels = []

    for idx_, tg_ in enumerate(predict_tag_rates):
        labels.append(tg_ + "_累计收益")
        y_data = [i_['avg_sum'] for i_ in predict_tag_rates[tg_]]
        plt.xticks(np.arange(len(x_data)), x_data)
        ln1, = plt.plot( y_data, color= cnames[idx_], linewidth=2.0, linestyle='-')
        lines.append(ln1)

    plt.title("分类股票收益率预测")  # 设置标题及字体
    plt.legend(handles=lines, labels=labels,)
    ax = plt.gca()
    ax.spines['right'].set_color('none')  # right边框属性设置为none 不显示
    ax.spines['top'].set_color('none')  # top边框属性设置为none 不显示

    plt.show()
    plot_roc(Ytest, gnb.predict_proba(Xtest)[:,1])
    pass
    # print(np.unique(Ytrain))
    # 补充缺失值
    # for col_name_ in ["bps", 'op_of_gr', 'roa2_yearly', 'roe_yoy']:
    #     #fixme 替换为去除这行数据
    # Xtrain[col_name_].fillna(Xtrain[col_name_].mean(), inplace=True)
    # print(classification_report(Ytrain.return_rate.ravel().tolist(), gnb.predict_proba(Xtrain).tolist(),labels=[1,-1]))
    # cluster_plot(d3,cut_2_k).show()
    # pass


if __name__ == '__main__':
    pd.set_option('mode.chained_assignment', 'raise')
    # download_price_tushare(18)
    # download_price_tushare(19)
    # download_yearly_report_tushare(year = 17)
    # download_yearly_report_tushare(year = 18)
    # download_yearly_report_tushare(year = 19)
    # main()
    main_tu_year()
