import time,re
import pandas as pd
import redis
import pickle
import logging
import hashlib
import configparser
import datetime
from sqlalchemy import create_engine, DateTime, String
import pymysql
import traceback
import warnings
from itertools import combinations

pymysql.install_as_MySQLdb()

warnings.filterwarnings('ignore')

log_format = "%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s"
date_format = "%Y-%m-%d %H:%M:%S"  # 精确到秒
logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=date_format)

# 初始化配置解析器
config = configparser.ConfigParser()

import os
current_dir = os.path.dirname(os.path.abspath(__file__))
config.read(current_dir+'/config.ini')


# 获取Redis的配置信息
redis_host = config.get('Redis', 'host')
# redis_host = "192.168.249.10"

redis_port = config.getint('Redis', 'port')
redis_db = config.getint('Redis', 'db')
redis_password = config.get('Redis', 'password')
r = redis.Redis(host=redis_host, port=redis_port, db=redis_db, password=redis_password)

mysql_port = config.getint('mysql', 'port')
mysql_host = config.get('mysql', 'host')
mysql_db = config.get('mysql', 'db')
import urllib.parse
mysql_password = urllib.parse.quote(config.get('mysql', 'password'))
mysql_user = config.get('mysql', 'user')
db_url = f'mysql://{mysql_user}:{mysql_password}@{mysql_host}:{mysql_port}/{mysql_db}'

engine = create_engine(db_url,pool_size=20,max_overflow=20,pool_recycle=60)


def get_concept_change(query_date,stock_fupan_df):

    real_stock_info_tdx = pickle.loads(r.get("real_stock_info_tdx"))

    stock_fupan_df = stock_fupan_df.dropna()
    stock_fupan_df['所属同花顺行业'] = stock_fupan_df['所属同花顺行业'].str.replace("-", ";")
    stock_fupan_df['所属概念'] = stock_fupan_df['所属概念'] + ";" + stock_fupan_df['所属同花顺行业']
    stock_fupan_df['所属概念'] = stock_fupan_df['所属概念'].str.split(';')


    stock_fupan_df = stock_fupan_df.explode('所属概念')
    stock_fupan_df_count = stock_fupan_df.groupby("所属概念").size().reset_index(name='count')
    stock_fupan_df_count =stock_fupan_df_count.query("count > 10 and count<500")

    stock_fupan_df_count = stock_fupan_df_count[~stock_fupan_df_count['所属概念'].isin(["专精特新","电子","电子商务","MSCI概念","汽车零部件Ⅲ","ST板块","医保目录","中药Ⅲ","通信服务Ⅲ","雷达","激光","其他社会服务","科创次新股",
                                                                                        "证券Ⅲ","人民币贬值受益","仪器仪表Ⅲ","互联网电商Ⅲ","贸易Ⅲ","燃气Ⅲ","物流Ⅲ","电子化学品Ⅲ","小家电Ⅲ","计算机设备Ⅲ",
                                                                                        "注册制次新股","摘帽","其他电源设备","其他医疗服务","其他电子Ⅲ","其他家用轻工","其他专用设备","其他化学原料","分拆上市意愿",
                                                                                        "国开行","重组蛋白","血氧仪","非汽车交运","新冠特效药","广播电视","专用设备","成飞概念","线缆部件及其他","王者荣耀","世界杯",
                                                                                        "创投","人工智能","同花顺漂亮100","参股基金","金属制品","三沙","数字水印","物联网感知层","猴痘概念","疫苗存储","肝炎概念","综合","同花顺中特估100","仿制药一致性评价",
                                                                                        "其他建材","冬奥会","联想概念","幽门螺杆菌概念","老字号","小额贷款","壳资源","独角兽概念","智慧党建","智能建筑","抗原检测","休闲零食","共享经济","村镇银行","参股民营银行","其他白色家电","其他化学制品","其他交运设备","其他电子","其他通用设备","猴痘概念",
                                                                                        "中原经济区","IP概念","NFT概念","鄱阳湖经济区","电力改革","蓝宝石","电力物联网","参股保险","参股新三板","PPP概念","国六标准、国六排放、国六","综合Ⅲ","医药商业Ⅲ","贸易Ⅲ","教育Ⅲ","其他自动化设备","其他生物制品","造纸Ⅲ","央视财经50","其他塑料制品","其他小金属","仪器仪表Ⅲ"])]

    stock_fupan_df = stock_fupan_df[stock_fupan_df['所属概念'].isin(stock_fupan_df_count["所属概念"].values)]


    df_concept_tdx = pd.merge(stock_fupan_df, real_stock_info_tdx, left_on='股票代码',right_on="code", how='inner')


    mask_20cm = (df_concept_tdx['code'].str.startswith('30') | df_concept_tdx['code'].str.startswith('68')) & (
            df_concept_tdx['change'] > 19.9)
    mask_10cm = (df_concept_tdx['change'] >= 9.9) & (
            df_concept_tdx['code'].str.startswith('60') | df_concept_tdx['code'].str.startswith('00'))

    df_concept_tdx['zt_20cm'] = mask_20cm.astype(int)
    df_concept_tdx['zt_10cm'] = mask_10cm.astype(int)
    df_concept_tdx = df_concept_tdx.query("change != -100")



    # result = result.merge(result.groupby('concept_name')['stock_change'].mean().round(2).reset_index().rename(
    #     columns={'stock_change': 'average_stock_change'}), on='concept_name', how='inner')

    df_concept_tdx["change_hs"] = round(
        (df_concept_tdx["price"] - df_concept_tdx["open"]) * 100 / df_concept_tdx["open"], 2)
    change_hs_mean = df_concept_tdx.groupby("所属概念")["change_hs"].mean().reset_index()


    df_concept_tdx = df_concept_tdx.groupby('所属概念').apply(lambda x: x.nlargest(10, 'change')).reset_index(drop=True)

    print(df_concept_tdx)


    #计算概念相似度
    concept_dict = df_concept_tdx.groupby('所属概念')['股票简称'].apply(list).to_dict()
    # 初始化相似度矩阵
    concepts = list(concept_dict.keys())
    similarity_matrix = pd.DataFrame(index=concepts, columns=concepts, dtype=float)

    # 计算相似度
    for (concept1, stocks1), (concept2, stocks2) in combinations(concept_dict.items(), 2):
        set1 = set(stocks1)
        set2 = set(stocks2)
        intersection = len(set1 & set2)
        union = len(set1 | set2)
        similarity = intersection / union if union != 0 else 0

        # 填充相似度矩阵
        similarity_matrix.at[concept1, concept2] = similarity
        similarity_matrix.at[concept2, concept1] = similarity

    for concept in concepts:
        similarity_matrix.at[concept, concept] = 1.0

    df_long = similarity_matrix.reset_index().melt(id_vars='index', var_name='相似概念', value_name='相似度')
    # 重命名列
    df_long = df_long.rename(columns={'index': '所属概念'})
    df_long.dropna(inplace=True)
    # 过滤掉相似度为0的行（如果需要）
    df_long = df_long[df_long['相似度'] > 0.2]
    df_long.sort_values(by="相似度", ascending=False, inplace=True)


    df_long = df_long.groupby('所属概念')['相似概念'].apply(lambda x: ';'.join(x)).reset_index()
    change_mean = df_concept_tdx.groupby("所属概念")["change"].mean().reset_index()



    zt_count_10cm = df_concept_tdx[df_concept_tdx['zt_10cm'] == 1].groupby("所属概念").size().reset_index(
        name='zt_10cm')

    zt_count_20cm = df_concept_tdx[df_concept_tdx['zt_20cm'] == 1].groupby("所属概念").size().reset_index(
        name='zt_20cm')
    # 合并所有统计结果
    result = pd.merge(change_mean, stock_fupan_df_count, on="所属概念")
    result = pd.merge(result, change_hs_mean, on="所属概念")
    result = pd.merge(result, zt_count_10cm, on="所属概念", how="left")
    result = pd.merge(result, zt_count_20cm, on="所属概念", how="left")
    result = pd.merge(result, df_long, on="所属概念", how="left")

    result['zt_10cm'] = result['zt_10cm'].fillna(0).astype(int)  # 填充 NaN 值并转换为整数
    result['zt_20cm'] = result['zt_20cm'].fillna(0).astype(int)  # 填充 NaN 值并转换为整数


    # result = result.query("count > 10 and count<500")

    result = result.sort_values(by='change', ascending=True)
    result.reset_index(drop=True, inplace=True)
    result['change_rps'] = result.index + 1
    result['change_rps'] = round(result['change_rps'] / len(result) * 100, 2)

    result = result.sort_values(by='change_hs', ascending=True)
    result.reset_index(drop=True, inplace=True)
    result['change_hs_rps'] = result.index + 1
    result['change_hs_rps'] = round(result['change_hs_rps'] / len(result) * 100, 2)
    result = result.rename(columns={"所属概念": "concept_name"})

    result = result.round(2)
    result["timestamp"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    r.set("real_market_concept_index_tdx", pickle.dumps(result))
    r.expire("real_market_concept_index_tdx", 3600*8)
    logging.info(f"插入概念数据{len(result)}条")

    # result.to_sql("real_market_concept_index_tdx", engine, if_exists='append', index=False,
    #               dtype={'timestamp': DateTime()})


if __name__ == "__main__":
    query_date = datetime.datetime.now().strftime('%Y%m%d')
    stock_fupan_df = pickle.loads(r.get(f"stock_panqian:{query_date}"))
    get_concept_change(query_date, stock_fupan_df)