import os
import re
import sys

sys.path.extend(["F:/ML/BNC", "D:/BNC", "/www/wwwroot/BNC"])
import time
from functools import lru_cache
from TuShare.TUShareObject import TUShareObject
from DataBase import DatabaseConsts
import pymysql
import datetime
from sqlalchemy import create_engine

host = '127.0.0.1'
port = 3306
db = DatabaseConsts.StockOneDayKLineDB
user = 'root'
password = 'h303567469'

MAX_NUM = sys.maxsize - 100
MIN_NUM = -(MAX_NUM - 10)

CODE_TAG_WEIGHT_TB = 'code_tag_weight'
DALIY_TAG_FRE_TB = 'daily_tag_fre'
STOCK_TAG_RELATION_ORIGIN = 'stocktagrelation_origin'
CODE_TAG_DATE = 'code_tag_date'
TAR_RENAME_TABLE_NAME = 'tag_rename'
stock_one_day_table_format = 'stock_one_day_{}'

TAG_FRE_FORUM_LINE = 7
# 选取每日日历
TAG_CALENDER_LINE = 20
TAG_FRE_MAX_NUM = MAX_NUM
TAG_FRE_MIN_NUM = 9
DEFAULT_WEIGHT = 1
MYSQL_CORE_NORMALIZED_SQL = '1'

DAILY_UP_THRESHOLD = 0.05
MORE_INFO = '直接使用复盘数据算出权重信息'

K = 20
date_slice_start = -(500 - 1)
date_slice_end = MAX_NUM

connection = pymysql.connect(host=host,
                             port=port,
                             user=user,
                             password=password,
                             db=db,
                             charset='utf8')

cursor = connection.cursor()
engine = create_engine(str(r"mysql+pymysql://%s:" + '%s' + "@%s/%s?charset=utf8") % (user, password, host, db))


def dateRange(start, end, step=1, format="%Y%m%d"):
    strptime, strftime = datetime.datetime.strptime, datetime.datetime.strftime
    days = (strptime(end, format) - strptime(start, format)).days
    return [strftime(strptime(start, format) + datetime.timedelta(i), format) for i in range(0, days, step)]


@lru_cache(maxsize=5000)
def code_ass_labels(code):
    select_code_ass_labels = 'select tag from {} where code = %s'.format(
        DatabaseConsts.AStockTagRelationTable)
    cursor.execute(select_code_ass_labels, [code])
    r = cursor.fetchall()
    res = list()

    for i in r:
        res.append(i[0])

    return res


def normal_one_if_not_exists_weight(cur_max_time):
    """只在单次权重计算开始时才能使用"""
    # 从表中查询并插入到code_tag_weight中，如果不存在的话
    # 删除全部的数据，并全部重新赋值为默认值WEIGHT
    trunate_sql = 'TRUNCATE {}'.format(CODE_TAG_WEIGHT_TB)
    re_detault_sql = 'INSERT INTO code_tag_weight (CODE, tag, weight) SELECT DISTINCT A.`CODE`, B.tag, {} FROM {} AS A INNER JOIN stocktagrelation AS B ON A.`Code` = B. CODE'.format(
        DEFAULT_WEIGHT,
        stock_one_day_table_format.format(cur_max_time))

    cursor.execute(trunate_sql)
    connection.commit()

    cursor.execute(re_detault_sql)
    connection.commit()

    pass


def get_daliy_tags(dayli_res_all, UNREACHABLE_WEIGHT) -> set:
    """ 获取当日爆发的标签数据 """
    tags = set()
    # 停用部分标签信息
    for one_res in dayli_res_all[: TAG_CALENDER_LINE]:
        if not one_res['weight'] == UNREACHABLE_WEIGHT:
            tags.add(one_res['tag'])

    return tags


def insert_code_weight(code_labels_weight):
    insert_sql = "insert into {} (code,tag,weight) values(%s,%s,%s)".format(DatabaseConsts.CodeTagWeightTableName)
    trunate_sql = "TRUNCATE {};".format(DatabaseConsts.CodeTagWeightTableName)

    cursor.execute(trunate_sql)
    connection.commit()

    cursor.executemany(insert_sql, code_labels_weight)
    connection.commit()


tag_eailies_appear_time = {}


def init_tag_ea_calendar():
    select_one_table_tag_format_ = "select MIN(date),tag from {} group by tag".format(CODE_TAG_DATE)
    cursor.execute(select_one_table_tag_format_)
    res = cursor.fetchall()
    for r in res:
        __date = r[0]
        __tag = r[1]
        tag_eailies_appear_time[__tag] = __date
    print("初始化标签出现的最早时间完成")


def init_stock_basic_info():
    pro_api = TUShareObject.instance()
    try:
        df = pro_api.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,list_date,delist_date')

        df.to_sql(DatabaseConsts.StockBasicTableName, con=engine, if_exists='replace', index=False)
    except Exception as e:
        print(e)


def generate_tag_daliy_fre(daterange):
    """ 为提高准确性，需要算出每个标签日常的数据"""
    turnate_db_data = "TRUNCATE {};".format(DALIY_TAG_FRE_TB)
    cursor.execute(turnate_db_data)
    connection.commit()

    for idx, cur_date in enumerate(daterange):
        if idx == 0:
            continue
        pre_date = daterange[idx - 1]

        sql = "INSERT into {}(tag,fre,date)".format(DALIY_TAG_FRE_TB) + \
              "(SELECT STRO.tag, count(*) fre,{} from {} AS B INNER JOIN {} as A on A.`Code` = B.`Code` " \
              "INNER JOIN stocktagrelation AS STRO ON STRO.code = A.`Code` GROUP BY STRO.tag ORDER BY fre desc )".format(
                  cur_date,
                  stock_one_day_table_format.format(cur_date),
                  stock_one_day_table_format.format(pre_date),
              )
        cursor.execute(sql)
        connection.commit()


def get_all_code_from_base():
    __all_codes = "select distinct(ts_code) from {}".format(DatabaseConsts.StockBasicTableName)
    cursor.execute(__all_codes)
    return [r[0] for r in cursor.fetchall()]


@lru_cache(maxsize=6000)
def get_code_start_time(code):
    select_code_end_time = "select list_date from {} where ts_code = %s"
    cursor.execute(select_code_end_time.format(DatabaseConsts.StockBasicTableName), [code])
    res = cursor.fetchone()
    if not res or not res[0]:
        return None
    return int(res[0])


@lru_cache(maxsize=6000)
def get_code_end_time(code):
    select_code_end_time = "select delist_date from {} where ts_code = %s"
    cursor.execute(select_code_end_time.format(DatabaseConsts.StockBasicTableName), [code])
    res = cursor.fetchone()
    if not res or not res[0]:
        return MAX_NUM
    return int(res[0])


# def write_to_log(content, params, rounds):
#     # 如果没有进行初始化就初始化
#     global cur_group
#     if cur_group == -1:
#         # 获取最大的group并加一
#         select_sql = 'select IFNULL(max(run_group),0) +1 from {}'.format(RUN_DATA_TB)
#         curr.execute(select_sql)
#         cur_group = curr.fetchone()[0]
#
#     _dat = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
#     # 装配 时间，
#     insert_sql = "insert into {} (data, run_date,round, more_info,run_group) values(%s,%s,%s,%s,%s)".format(RUN_DATA_TB)
#
#     curr.execute(insert_sql, [content, _dat, rounds, params, cur_group])
#     connection.commit()

def not_like(col, key_words):
    base_li = list()
    base_li.extend(key_words)
    st = ""
    for word in base_li:
        st += "{} not like '%{}%' and ".format(col, word)
    pass
    st = st[:-4]
    return st


def like(col, key_words):
    base_li = list()
    base_li.extend(key_words)
    st = ""
    for word in base_li:
        st += "{}  like '%{}%' and ".format(col, word)
    pass
    st = st[:-4]
    return st


def in_sql(key_words):
    base_li = ['']
    base_li.extend(key_words)
    return str(base_li).replace("[", '(').replace("]", ')')


api_call_count_default = -1
api_call_count = api_call_count_default
logs_path = '/logs/'
cur_log_save_path = None


def write_to_log(content, params):
    global logs_path, api_call_count, cur_log_save_path
    if api_call_count == api_call_count_default:
        logs_path = os.path.join(os.getcwd(), logs_path)

        os.makedirs(logs_path, exist_ok=True)

        _file_max_api_call_count = 0
        for log in os.listdir(logs_path):
            if len(log.split('-')) > 1:
                facc = int(log.split('-')[0])
                _file_max_api_call_count = max(_file_max_api_call_count, int(facc))

        # 取出最大的值
        _file_max_api_call_count += 1
        api_call_count = _file_max_api_call_count

        #     文件格式: 次数-日期-参数
        cur_log_save_path = os.path.join(logs_path,
                                         "{}-{}-{} .log".format(api_call_count, re.sub(r'[\\/:*?"<>|.]', '`', str(
                                             datetime.datetime.today())),
                                                                re.sub(r'[\\/:*?"<>|.]', '`', str(params))))

    with open(cur_log_save_path, 'a', encoding='utf8') as f:
        f.write(content)


forum_stop_word = ['涨价', '次新', '首批']
forum_stop_like_word = ['低价', '季报', '转让']
# 屏蔽这些标签
stop_tag = ['标普道琼斯A股', '转融券标的', '融资融券', '富时罗素概念股', '新股与次新股', '沪股通', '深股通', 'MSCI预期', 'MSCI概念', '央企国资改革',
            '地方国资改革', 'ST板块', '人民币贬值受益', '壳资源', '参股保险', '参股券商', '参股民营银行'
            ]
# 如果包含这个标签就直接屏蔽掉整个股票
__code_stop_tag = ['ST板块']
__stop_tag_like = ['%国资改革%']
# 需要反向移除一部分
# 去除(-o , min] [max, +o)

select_top_freq_tag_sql = "select tag,count(*) fre from stocktagrelation  GROUP BY tag HAVING (fre <= %s or fre >= %s) or {} order BY fre DESC" \
    .format(like('tag', __stop_tag_like))
cursor.execute(select_top_freq_tag_sql, [TAG_FRE_MIN_NUM, TAG_FRE_MAX_NUM])
stop_tag.extend([r[0] for r in cursor.fetchall()])

stop_codes = ()
# fixme 释放
# select_stop_codes_sql = "select code from stocktagrelation where tag in {}"
# cursor.execute(select_stop_codes_sql.format(in_sql(__code_stop_tag)))
# res = cursor.fetchall()
# stop_codes = tuple([r[0] for r in res])

# 查看每天的标题数据和已知的数据的区别
# 计算公式为 tag_contribution = sum( up * weight) / today_tag_fre
select_dayli_top_sql_format_all_first = " SELECT B.date, STR.tag, ( SUM( (B.ClosePrice - A.ClosePrice)" + \
                                        " / A.ClosePrice * IFNULL(CTW.weight, 0) ) * 1)*{}  " \
                                            .format(MYSQL_CORE_NORMALIZED_SQL) + \
                                        " AS weight FROM {} AS B INNER JOIN {} AS A ON A.`Code` = B.`Code` " \
                                        " INNER JOIN stocktagrelation AS STR ON B.`Code` = STR.`code` " \
                                        " INNER JOIN code_tag_weight as CTW ON CTW.code = B.`Code` AND CTW.tag = STR.tag  " + \
                                        " INNER JOIN {} AS DTFT ON DTFT.tag = STR.tag".format(DALIY_TAG_FRE_TB) + \
                                        " AND DTFT.date = {} " + \
                                        " WHERE (B.ClosePrice - A.ClosePrice)/ A.ClosePrice >= {} and A.`Code` not in {}" \
                                            .format(DAILY_UP_THRESHOLD, in_sql(stop_codes)) + \
                                        "  AND STR.tag NOT IN {} GROUP BY B.Date,STR.tag ORDER BY weight DESC" \
                                            .format(in_sql(stop_tag))

# 计算股票和标签之间的关联度， up*weight1 -> weight2
select_one_code_one_tag_weight = "SELECT CTW. CODE, CTW.tag, sum( ( (B.ClosePrice - A.ClosePrice) / A.ClosePrice ) * IFNULL(CTW.weight, 0) )  AS weight " \
                                 " FROM {} AS B INNER JOIN {} AS A ON A.`Code` = B.`Code` INNER JOIN stocktagrelation STR ON STR.`code` = A.`Code` " \
                                 + " AND A.`Code` NOT IN {} AND STR.tag NOT IN {} INNER JOIN code_tag_weight AS CTW ON CTW. CODE = A. CODE AND STR.tag = CTW.tag " \
                                     .format(in_sql(stop_codes), in_sql(stop_tag)) + \
                                 " WHERE ( (B.ClosePrice - A.ClosePrice) / A.ClosePrice ) >= {} GROUP BY CTW. CODE, CTW.tag ORDER BY tag ,weight DESC" \
                                     .format(DAILY_UP_THRESHOLD)

# select_one_code_one_tag_weight = "SELECT CTW. CODE, CTW.tag, sum( ( (B.ClosePrice - A.ClosePrice) / A.ClosePrice ) * IFNULL(CTW.weight, 0) )  AS weight " \
#                                  "FROM {} AS B INNER JOIN {} AS A ON A.`Code` = B.`Code` INNER JOIN stocktagrelation STR ON STR.`code` = A.`Code` " \
#                                  +"AND A.`Code` NOT IN {} AND STR.tag NOT IN {} INNER JOIN code_tag_weight AS CTW ON CTW. CODE = A. CODE AND STR.tag = CTW.tag ".format(in_sql(stop_codes), in_sql(stop_tag))+ \
#                                   "INNER JOIN ( SELECT CTW.tag, SUM( (  (B.ClosePrice - A.ClosePrice) / A.ClosePrice ) * IFNULL(CTW.weight, 0) ) AS weight " \
#                                    +"FROM {} AS B INNER JOIN {} AS A ON A.`Code` = B.`Code` INNER JOIN stocktagrelation STR ON STR.`code` = A.`Code` " \
#                                    +"AND A.`Code` NOT IN {} AND STR.tag NOT IN {}".format(in_sql(stop_codes), in_sql(stop_tag))+ \
#                                     " INNER JOIN code_tag_weight AS CTW ON CTW. CODE = A. CODE AND STR.tag = CTW.tag AND ( (B.ClosePrice - A.ClosePrice) / A.ClosePrice ) >= 0 GROUP BY CTW.tag ) ALL_SUM ON ALL_SUM.tag = CTW.tag WHERE ( (B.ClosePrice - A.ClosePrice) / A.ClosePrice ) >= 0 AND ALL_SUM.weight IS NOT NULL AND ALL_SUM.weight != 0 GROUP BY CTW. CODE, CTW.tag ORDER BY tag ,weight DESC"

select_forum_top_sql_format = "select tag, count(tag) as fre from code_tag_date where date = '{}'" + \
                              " and ( {} and tag not in {} ) GROUP BY tag ORDER BY  fre DESC " \
                                  .format(not_like('tag', forum_stop_like_word), in_sql(forum_stop_word))
tables_sql = "select table_name from information_schema.tables where table_schema='{}' and table_type='base table' and table_name LIKE 'stock_one_day_%';" \
    .format(DatabaseConsts.StockOneDayKLineDB)


def draw_bar(tags_daliy_socre_sequence):
    import matplotlib.pyplot as plt
    import seaborn as sns
    # 提取多个数据
    # 华为和无线耳机的数据
    show_tags = ['华为概念', '无线耳机']
    fig = plt.figure()
    for idx, tag in enumerate(show_tags):

        date_list = []
        score_list = []
        for o_day_score in tags_daliy_socre_sequence[tag]:
            date_list.append(float(o_day_score['date']))
            score_list.append(float(o_day_score['score']))

        # plt.figure(tag)
        # plt.subplot(121 + idx, axisbg=color)
        # plt.title(idx)

        ax1 = fig.add_subplot(211 + idx)
        ax1.set_title(tag)
        # i+=1
        # plt.bar(range(len(date_list)), score_list, color='rgb', tick_label=date_list)
        print("tag score mean:{} ; median : {} ; many_num:{}".format(np.mean(score_list), np.median(score_list),
                                                                     np.argmax(np.bincount(score_list))))
        sns.distplot(score_list, bins=20, rug=True, ax=ax1, kde=False)

        # plt.hist(score_list, bins=40, histtype="stepfilled", alpha=2)
        # sns.distplot(score_list, rug=True, label = tag)
    plt.show()


import numpy as np


def over_over_caclu_calendar(date_rang, times):
    """ 计算标签的爆发日历
        第一次使用参考数据算出当日爆发的标签， 用于算出code_tag weight
        第二次 使用算出的code_tag weight  修改公式再次于参考数据进行拟合
        评价函数暂时没有确定

        第一次计算需要打印日志吗？ 不需要 -> 只需要知道当日爆发的即可，然后用于算weight

        第二次需要尝试不同的参数进行调整，通过评价函数算出最终的得分，然后打印到日志中，可能需要打印到多个日志中去
            需要使用不同的sql函数，所以不能直接使用外部的运算结果，或者说需要使用多次外部的运算结果，将两部分的代码合在一起

        二次运算的结果?
            输出得分， 当日爆发的日历，（如何保证股票和标签之间的关系正确）
    """
    shift_args = [1]
    __MYSQL_CORE_NORMALIZED_SQL_format = ''
    __select_dayli_top_sql_format_all_first = " SELECT B.date, STR.tag, ( SUM( (B.ClosePrice - A.ClosePrice)" + \
                                              " / A.ClosePrice * IFNULL(CTW.weight, 0) ) * 1)*{}  " \
                                                  .format(__MYSQL_CORE_NORMALIZED_SQL_format) + \
                                              " AS weight FROM {} AS B INNER JOIN {} AS A ON A.`Code` = B.`Code` " \
                                              " INNER JOIN stocktagrelation AS STR ON B.`Code` = STR.`code` " \
                                              " INNER JOIN code_tag_weight as CTW ON CTW.code = B.`Code` AND CTW.tag = STR.tag  " + \
                                              " INNER JOIN {} AS DTFT ON DTFT.tag = STR.tag".format(DALIY_TAG_FRE_TB) + \
                                              " AND DTFT.date = {} " + \
                                              " WHERE (B.ClosePrice - A.ClosePrice)/ A.ClosePrice >= {} and A.`Code` not in {}" \
                                                  .format(DAILY_UP_THRESHOLD, in_sql(stop_codes)) + \
                                              "  AND STR.tag NOT IN {} GROUP BY B.Date,STR.tag ORDER BY weight DESC" \
                                                  .format(in_sql(stop_tag))
    _date_rang_start = ''
    _date_rang_end = ''
    logger_file_pattern = "{} ,tag fre limit :{} ,db normal func : {}  , rounds :{} , daily up threshold%:{} tag cut line:{}"

    date_range_int = []
    for shift_arg__ in shift_args:
        all_dates_daliy_res_temp = dict()
        # 标签计算公式为 uw : up*weight

        for index, cur_date in enumerate(date_range_int):
            if index == 0:
                continue

            pre_date = date_range_int[index - 1]

            stt = time.clock()
            # todo 确定了指标后在进行重新计算
            SQL = __select_dayli_top_sql_format_all_first.format(
                shift_arg__, stock_one_day_table_format.format(cur_date),
                stock_one_day_table_format.format(pre_date), cur_date
            )
            print("SQL:{}".format(SQL))
            cursor.execute(SQL)
            daliy_res_all = cursor.fetchall()
            all_dates_daliy_res_temp[cur_date] = daliy_res_all
            # 已经可以算出每天的值
            print("select dayli sql execute time :{}".format(time.clock() - stt))
            # 切断表现频率不足的标签，最后一次不进行切割

        # 进行分割和打印日志
        for one_day_date_ in all_dates_daliy_res_temp.keys():
            daliy_res = all_dates_daliy_res_temp[one_day_date_]

            new_daliy_res = list()
            for one_day_res in daliy_res:
                _tag = one_day_res[1]
                _scr = one_day_res[2]
                __weight = _scr
                new_daliy_res.append(
                    {'tag': _tag,
                     'weight': __weight})
            #  按重算的权重的从高至低排序
            new_daliy_res = sorted(new_daliy_res, key=lambda x: -x['weight'])

            def get_daliy_res(daliy_res) -> list:
                __twice_spilt_line = 20
                __new_daliy_res = list()
                # for res in daliy_res:
                #     __weight = res['weight']
                #     if __weight > 0 :
                #         __new_daliy_res =
                # 通过不同的方式算出来
                return daliy_res[:__twice_spilt_line]

            new_daliy_res = get_daliy_res(new_daliy_res)

            _cur_date_s = str(one_day_date_)
            cursor.execute(
                select_forum_top_sql_format.format(
                    "{}-{}-{}".format(_cur_date_s[:4], _cur_date_s[4:6], _cur_date_s[6:])))
            forum_dayli_res = cursor.fetchall()

            content = "--------------------- {} rounds-----------------------" \
                      "day : {} \n 结果:{}\n" \
                      "参考结果 :{}\n\n\n\n" \
                .format(times, one_day_date_, new_daliy_res, forum_dayli_res[:15])
            write_to_log(content,
                         logger_file_pattern.format(
                             MORE_INFO,
                             TAG_FRE_MAX_NUM,
                             MYSQL_CORE_NORMALIZED_SQL,
                             times,
                             DAILY_UP_THRESHOLD,
                             TAG_CALENDER_LINE
                         ))


def get_exist_day():
    cursor.execute(tables_sql)
    table_name_tp = cursor.fetchall()
    return table_name_tp


def first_calendar_cal(daily_tag_calendar: dict):
    """ 根据复盘数据中的 code _ tag _ weight -> 1  one_code_one_tag_weight = sum(di +i )/ di"""

    def get_broke_tag_from_forum() -> dict:
        """ 返回值为 {'date1':{'tags':['', '']}}"""
        # 需要求出差集
        _select_cur_up_tag_from_forum = "select date ,TR.to as tag_r, count(*) as fre from {} AS CTD INNER JOIN {} AS TR ON TR.from = CTD.tag group by date,TR.to".format(
            CODE_TAG_DATE, TAR_RENAME_TABLE_NAME)
        cursor.execute(_select_cur_up_tag_from_forum)
        reform_date_tag_calendar = dict()
        for r in cursor.fetchall():
            _date = r[0]
            _tag = r[1]
            _fre = r[2]
            if _date not in reform_date_tag_calendar.keys():
                reform_date_tag_calendar[_date] = dict()
            _td = reform_date_tag_calendar[_date]
            _td['fre'] = _fre
            _td['tag'] = _tag

        # fixme 指定大小或者其他方式实现最终的切割
        def cut_tag_weight(reform_date_tag_calendar):
            broke_tag = dict()
            for _date_tf_item in reform_date_tag_calendar.items():
                fre_tag_i = _date_tf_item[1]
                for t_f_item in fre_tag_i.items():
                    it = t_f_item[1]
                    if it['fre'] >= TAG_FRE_FORUM_LINE:
                        if _date_tf_item[0] not in broke_tag:
                            broke_tag[_date_tf_item[0]] = list()
                        broke_tag[_date_tf_item[0]].append(it['tag'])
            return broke_tag

        return cut_tag_weight(reform_date_tag_calendar=reform_date_tag_calendar)

    dt_calendar_f_froum = get_broke_tag_from_forum()
    for dt_calendar_f_item in dt_calendar_f_froum:
        daily_tag_calendar[dt_calendar_f_item[0]] = dt_calendar_f_item[1]


#
# def first_calendar_cal(daily_tag_calendar : dict):
#     """ 窃取每天tag  > 6 的标签作为"""
#     def get_broke_tag_from_forum() -> dict:
#         """ 返回值为 {'date1':{'tags':['', '']}}"""
#         # 需要求出差集
#         _select_cur_up_tag_from_forum = "select date ,TR.to as tag_r, count(*) as fre from {} AS CTD INNER JOIN {} AS TR ON TR.from = CTD.tag group by date,TR.to".format(
#             CODE_TAG_DATE, TAR_RENAME_TABLE_NAME)
#         cursor.execute(_select_cur_up_tag_from_forum)
#         reform_date_tag_calendar = dict()
#         for r in cursor.fetchall():
#             _date = r[0]
#             _tag = r[1]
#             _fre = r[2]
#             if _date not in reform_date_tag_calendar.keys():
#                 reform_date_tag_calendar[_date] = dict()
#             _td = reform_date_tag_calendar[_date]
#             _td['fre'] = _fre
#             _td['tag'] = _tag
#
#         # fixme 指定大小或者其他方式实现最终的切割
#         def cut_tag_weight(reform_date_tag_calendar):
#             broke_tag = dict()
#             for _date_tf_item in reform_date_tag_calendar.items():
#                 fre_tag_i = _date_tf_item[1]
#                 for t_f_item in fre_tag_i.items():
#                     it = t_f_item[1]
#                     if it['fre'] >= TAG_FRE_FORUM_LINE:
#                         if _date_tf_item[0] not in broke_tag:
#                             broke_tag[_date_tf_item[0]] = list()
#                         broke_tag[_date_tf_item[0]].append(it['tag'])
#             return broke_tag
#
#         return cut_tag_weight(reform_date_tag_calendar=reform_date_tag_calendar)
#
#     dt_calendar_f_froum = get_broke_tag_from_forum()
#     for dt_calendar_f_item in dt_calendar_f_froum:
#         daily_tag_calendar[dt_calendar_f_item[0]] = dt_calendar_f_item[1]


def get_forum_calendar(date_range_int: list):
    tags_set = set()
    for one_day_date_ in date_range_int:

        print("parse : {}".format(one_day_date_))
        _cur_date_s = str(one_day_date_)
        cursor.execute(
            select_forum_top_sql_format.format(
                "{}-{}-{}".format(_cur_date_s[:4], _cur_date_s[4:6], _cur_date_s[6:])))
        # print("sql : {} ".format(select_forum_top_sql_format.format(
        #     "{}-{}-{}".format(_cur_date_s[:4], _cur_date_s[4:6], _cur_date_s[6:]))))
        forum_dayli_res = cursor.fetchall()

        daliy_tags = set()

        def select_top_forum_tag(forum_dayli_res):
            __daliy_tags_res = set()
            for one_row in forum_dayli_res:
                if one_row[1] >= 6:
                    __daliy_tags_res.add(one_row[0])
            return __daliy_tags_res

        daliy_tags = select_top_forum_tag(forum_dayli_res)
        tags_set.update(daliy_tags)
    return tags_set


def main_4_print_and_cal_weight():
    date_range_int = [int(tn[-8:]) for tn in [tn[0] for tn in get_exist_day()]]
    date_range_int = sorted(date_range_int)

    # 去除最开始的一天因为没有前一天，
    date_range_int = date_range_int[date_slice_start: date_slice_end]
    # fixme 修改切片的范围
    st = (min(date_range_int[1:]))
    et = (max(date_range_int[1:]))

    times = 0
    init_only_once = False
    max_convergence_times = 1
    tags_daliy_socre_sequence = dict()  # {t1:[{date:201911,score = 1390.0032},{date:201912,socre=9321.321}] , t2:[]}

    # return get_forum_calendar(date_range_int)
    while times <= max_convergence_times:
        if not init_only_once:
            normal_one_if_not_exists_weight(et)
            init_tag_ea_calendar()
            # generate_tag_daliy_fre(date_range_int)
            init_only_once = True

        daily_tag_calendar = dict()

        assert times <= 1, "times不应该超过1"
        if 0 == times:
            # 通过当日信息计算出爆发的标签
            # first_calendar_cal(daily_tag_calendar)
            pass
        elif 1 == times:
            # 使用weight进行重复的计算，保证每天符合预期
            # over_over_caclu_calendar(date_rang=date_range_int, times = times)
            times+=1
            # 结束循环
            break

        # 跳过最后一次
        if times == max_convergence_times:
            break

        # 计算权重
        rstd = ''
        retd = ''
        tag_std = ''
        tag_etd = MAX_NUM  # 标签最晚的时间设置最大
        cur_etd = int(datetime.date.today().strftime("%Y%m%d"))

        # 进行权重计算
        all_codes = get_all_code_from_base()
        labels_associated_with_code = []
        code_labels_weight = list()

        # 开始计算单个股票单个标签的权重
        # 计算单个code的tag在历史上出现的次数
        code_labels_weight_temp = dict()  #
        for code in all_codes:

            labels_associated_with_code = code_ass_labels(code)
            code_tag_start_time = get_code_start_time(code)
            code_tag_end_time = get_code_end_time(code)

            for label in labels_associated_with_code:
                occur_times = 0

                if label not in tag_eailies_appear_time:
                    tag_std = MAX_NUM
                else:
                    tag_std = tag_eailies_appear_time[label]
                    # print("code : {} tag_std:{} , label :{}".format(code, tag_std, label))
                # print("code :{} a : {} , b :{}".format(code, (tag_std, code_tag_start_time, st, ) , (tag_etd, codetag_end_time, cur_etd, et, )))
                rstd = max(tag_std, code_tag_start_time, st)
                retd = min(tag_etd, code_tag_end_time, cur_etd, et)
                if not ((st <= rstd <= et) and (st <= retd <= et)):
                    continue

                # 前提是有序
                occur_times = date_range_int.index(retd) - date_range_int.index(rstd) + 1
                # 保证大于0
                assert occur_times >= 0, "时间周期小于0 开始时间：{} 结束时间：{} 范围大小：{}".format(rstd, retd, occur_times)

                # 初始化内容
                code_labels_weight_temp.setdefault(code,dict())
                code_labels_weight_temp[code].setdefault(label, dict())
                code_labels_weight_temp[code][label] = [0, occur_times]

            start = time.clock()
            # 选出当前的code_tag_weight
            _code_tag_weight_sql = "select distinct code, tag,  count(1) , date from {} where date >= %s and date <= %s GROUP BY date,code,tag ".format(CODE_TAG_DATE)
            cursor.execute(_code_tag_weight_sql, [st, et])
            res = cursor.fetchall()

            for r in res:
                __code = r[0]
                __tag = r[1]
                __up_weight = r[2]
                weight_times = code_labels_weight_temp[__code][__tag]
                weight_times[0] += __up_weight
                # weight_times[1] += 1
                code_labels_weight_temp[__code][__tag] = weight_times
                print("parse date :{}".format(r[3], time.clock() - start))

        for code_ in code_labels_weight_temp.keys():
            for tag_ in code_labels_weight_temp[code_].keys():
                one_ctw = code_labels_weight_temp[code_][tag_]
                if one_ctw[1] == 0:
                    continue
                code_labels_weight.append((code_, tag_, one_ctw[0] / one_ctw[1],))

        insert_code_weight(code_labels_weight)
        times += 1


def get_tag_rename_dict(words_set):
    import difflib
    ths_sql = "select distinct(tag) from {}".format(STOCK_TAG_RELATION_ORIGIN)
    # forum_sql = "select distinct(tag) from {} where LENGTH(tag) <= 24 ".format(CODE_TAG_DATE)
    cursor.execute(ths_sql)

    # 找出同花顺的所有标签
    tag_from_ths = set([r[0] for r in cursor.fetchall()])

    # cursor.execute(forum_sql)
    # 找出所有不标准的标签
    tag_from_unnormal = words_set
    # 去除部分数据
    need_remove_tag = set()
    contain_word = ('收购', '年报', '季报', '月报', '亏顺', '之一', '子公司', '上市', '超跌')
    for ftu__ in tag_from_unnormal:
        if not len(ftu__) <= 8:
            need_remove_tag.add(ftu__)
            continue
        for cw in contain_word:
            if ftu__.find(cw) > 0 or len(ftu__) > 6:
                need_remove_tag.add(ftu__)
                break
    tag_from_unnormal -= need_remove_tag

    # 生成映射关系,
    map_word_2_normal = dict()  # {un : {'n' : '', score :''}}
    # 输出未确定的词语
    uncertainty_word = set()
    right_same_word = set()

    for tfu in tag_from_unnormal:
        parsed_unnomraled = False
        for tft in tag_from_ths:
            ratio = difflib.SequenceMatcher(None, tft, tfu).ratio()
            # 概念
            if tft == tfu + "概念":
                right_same_word.add(tft)
                # map_word_2_normal[tfu] = {'n': tft, 's': MAX_NUM}
                parsed_unnomraled = True
                continue
            elif ratio == 1:
                right_same_word.add(tfu)
                parsed_unnomraled = True
            elif ratio > 0.5:
                map_word_2_normal.setdefault(tfu, {'n': '', 's': -MAX_NUM})
                if map_word_2_normal[tfu]['s'] < ratio:
                    map_word_2_normal[tfu] = {'n': tft, 's': ratio}
                    print(" tfu : {} --> tft : {} ,  ratio : {}".format(tfu, tft, ratio))
                parsed_unnomraled = True
        if not parsed_unnomraled:
            uncertainty_word.add(tfu)

    # 去除tag_from_unormal中的标准标签
    mwn_keys = list(map_word_2_normal.keys())
    for mwn_k in mwn_keys:
        if mwn_k in right_same_word:
            del (map_word_2_normal[mwn_k])

    uncertainty_word = tag_from_unnormal - set([r for r in map_word_2_normal]) - right_same_word

    out_put_word_map = dict()
    for map_word_2_normal_item_ in map_word_2_normal.items():
        if map_word_2_normal_item_[1]['s'] != -MAX_NUM:
            out_put_word_map[map_word_2_normal_item_[0]] = map_word_2_normal_item_[1]['n']
        else:
            uncertainty_word.add(map_word_2_normal_item_[0])


    # 去除相互重复的数据
    print("right_word : {} \n map_dic : {} \n uncertainty_word : {}".format(right_same_word, out_put_word_map,
                                                                            uncertainty_word))
    return map_word_2_normal


if __name__ == '__main__':
    # init_stock_basic_info()
    main_4_print_and_cal_weight()
    # import difflib
    # print(difflib.SequenceMatcher(None, '华为概念', '华为').ratio())
    # print(difflib.SequenceMatcher(None, '医药', '中医药').ratio())
    # print(difflib.SequenceMatcher(None, '金改', '金融改革').ratio())
