import time,re
import pandas as pd
import redis
import pickle
import logging
import hashlib
import configparser
import datetime
from sqlalchemy import create_engine, DateTime, String
import pymysql
import traceback
import warnings
from itertools import combinations
from sklearn.preprocessing import MultiLabelBinarizer
import numpy as np

pymysql.install_as_MySQLdb()

warnings.filterwarnings('ignore')
mlb = MultiLabelBinarizer()

log_format = "%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s"
date_format = "%Y-%m-%d %H:%M:%S"  # 精确到秒
logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=date_format)

# 初始化配置解析器
config = configparser.ConfigParser()

import os
current_dir = os.path.dirname(os.path.abspath(__file__))
config.read(current_dir+'/config.ini')


# 获取Redis的配置信息
redis_host = config.get('Redis', 'host')
# redis_host = "192.168.249.10"

redis_port = config.getint('Redis', 'port')
redis_db = config.getint('Redis', 'db')
redis_password = config.get('Redis', 'password')
r = redis.Redis(host=redis_host, port=redis_port, db=redis_db, password=redis_password)

mysql_port = config.getint('mysql', 'port')
mysql_host = config.get('mysql', 'host')
mysql_db = config.get('mysql', 'db')
import urllib.parse
mysql_password = urllib.parse.quote(config.get('mysql', 'password'))
mysql_user = config.get('mysql', 'user')
db_url = f'mysql://{mysql_user}:{mysql_password}@{mysql_host}:{mysql_port}/{mysql_db}'

engine = create_engine(db_url,pool_size=20,max_overflow=20,pool_recycle=60)


def get_concept_change(query_date,stock_fupan_df):

    if r.get("is_trade_time") ==b"NO":
        return


    real_stock_info_tdx = pickle.loads(r.get("real_stock_info_tdx"))

    stock_fupan_df = stock_fupan_df.dropna()
    stock_fupan_df['所属同花顺行业'] = stock_fupan_df['所属同花顺行业'].str.replace("-", ";")
    stock_fupan_df['所属概念'] = stock_fupan_df['所属概念'] + ";" + stock_fupan_df['所属同花顺行业']
    stock_fupan_df['所属概念'] = stock_fupan_df['所属概念'].str.split(';')


    stock_fupan_df = stock_fupan_df.explode('所属概念')
    stock_fupan_df_count = stock_fupan_df.groupby("所属概念").size().reset_index(name='count')
    stock_fupan_df_count =stock_fupan_df_count.query("count > 10 and count<500")

    stock_fupan_df_count = stock_fupan_df_count[~stock_fupan_df_count['所属概念'].isin(
        [
            "专精特新", "电子", "电子商务", "MSCI概念", "汽车零部件Ⅲ", "ST板块", "医保目录", "中药Ⅲ", "通信服务Ⅲ",
            "雷达",
            "激光", "其他社会服务", "科创次新股", "一元股", "养老金持股", "兽药", "军民融合", "农垦改革", "冬虫夏草",
            "冷链物流", "动物保健", "化工合成材料",
            "证券Ⅲ", "人民币贬值受益", "仪器仪表Ⅲ", "互联网电商Ⅲ", "贸易Ⅲ", "燃气Ⅲ", "物流Ⅲ", "电子化学品Ⅲ", "小家电Ⅲ",
            "阿尔茨海默概念",
            "计算机设备Ⅲ", "其他金属新材料", "鞋帽及其他", "北交所概念", "区块链", "区块链底层", "医疗改革", "医药安全",
            "南沙新区", "单抗",
            "注册制次新股", "摘帽", "其他电源设备", "其他医疗服务", "其他电子Ⅲ", "其他家用轻工", "其他专用设备",
            "非金属材料Ⅲ", "其他食品",
            "其他化学原料", "分拆上市意愿", "专业工程", "专业服务", "优先股概念", "全屋定制", "其他农产品加工",
            "其他橡胶制品", "其他纺织", "其他通信设备",
            "国开行", "重组蛋白", "血氧仪", "非汽车交运", "新冠特效药", "广播电视", "专用设备", "成飞概念",
            "埃博拉概念",
            "facebook概念", "郭台铭概念",
            "线缆部件及其他", "王者荣耀", "世界杯", "参股万达商业", "反恐", "商汤科技概念", "国家科技大会", "国开行",
            "城商行", "乙肝治疗", "电梯概念",
            "创投", "同花顺漂亮100", "参股基金", "金属制品", "三沙", "数字水印", "物联网感知层", "猴痘概念", "疫苗存储",
            "人造太阳", "美团概念股",
            "肝炎概念", "综合", "同花顺中特估100", "仿制药一致性评价", "多肽药", "大豆", "奢侈品", "宽带中国",
            "富士康概念",
            "富媒体", "蚂蚁金服概念",
            "其他建材", "冬奥会", "联想概念", "幽门螺杆菌概念", "老字号", "小额贷款", "壳资源", "独角兽概念",
            "智慧党建",
            "二手车", "谷歌概念",
            "智能建筑", "抗原检测", "休闲零食", "共享经济", "村镇银行", "参股民营银行", "其他白色家电", "其他化学制品",
            "京东概念", "滴滴概念股",
            "其他交运设备", "其他电子", "其他通用设备", "猴痘概念", "平安资管持股", "平面媒体", "征信", "快手概念",
            "微信概念", "快递", "厄尔尼诺概念",
            "中原经济区", "IP概念", "NFT概念", "鄱阳湖经济区", "电力改革", "蓝宝石", "电力物联网", "参股保险",
            "其他通信设备", "儿童医药医疗",
            "参股新三板", "PPP概念", "国六标准、国六排放、国六", "综合Ⅲ", "医药商业Ⅲ", "贸易Ⅲ", "教育Ⅲ", "其他自动化设备",
            "特钢概念", "拼多多概念",
            "个护用品", "风电设备", "铜冶炼", "软饮料", "超级品牌", "超清视频", "贵金属Ⅲ", "共同富裕示范区", "干细胞",
            "态势感知", "恒大概念", "抖音小店", "抖音概念",
            "抗肿瘤", "指纹技术", "抗艾滋病", "抗病毒面料", "新疆建设兵团", "普钢", "无线充电", "智慧灯杆", "辉瑞概念",
            "足球概念", "调味发酵品", "语音技术", "证金持股",
            "计算机", "西尼罗病毒", "草地贪夜蛾防治", "航运", "网络电视", "网络视频", "网络切片", "线型", "磨具磨料",
            "登革热", "疫苗", "特色小镇", "特种玻璃", "SAAS",
            "VR平台", "WIN升级", "网络直播", "肝素", "腾讯概念", "膜材料", "自然景点", "药品信息化追溯", "高校",
            "F5G概念",
            "B转H", "电动汽车", "VPN", "互联网银行", "保障房",
            "制冷空调设备", "可燃冰", "新能源物流车", "新能源整车", "新能源发电", "林场改革", "杭州亚运会", "饰品",
            "长三角一体化", "遥感技术", "通信", "迪士尼", "透明工厂", "进口博览会",
            "西部开发", "被动元件", "股权转让", "胎压监测", "棚户区改造", "服装", "水产品", "华为欧拉", "化妆品",
            "分立器件", "净水概念", "农机", "农村电商", "兰新白试验区", "胰岛素", "海洋经济",
            "参股银行", "参股券商", "原料药", "合成树脂", "噪声防治", "固废治理", "基站射频", "增强现实", "多模态AI",
            "大连自贸区", "天津自贸区", "太赫兹", "宁德时代概念", "射频器", "工业互联网",
            "工程建材", "广告营销", "建筑涂料", "影视院线", "机床工具", "机械装备", "杭州湾大湾区", "毛发医疗",
            "非金属材料", "阿里巴巴概念", "长安汽车概念", "重庆自贸区", "通用设备", "PVC", "TMT",
            "裸眼3D", "脱硫脱硝", "精装修", "社区团购", "纺织化学用品", "租售同权", "移动支付", "两江新区", "两会",
            "锂电隔膜", "电池", "电子信息", "电子化学品", "牙科医疗", "海洋经济", "海绵城市", "养老概念", "乡村振兴",
            "油气运输仓储", "东北亚经贸中心", "地面兵装", "增强现实", "锂电制造", "锂电原料", "雄安新区", "铝材加工",
            "金改", "贸易", "触摸屏", "统一大市场", "中航系", "互联网彩票", "互联网电商", "互联网券商", "互联网保险",
            "人脸识别", "俄乌冲突概念", "免税店", "公路铁路运输", "公路建设", "新冠疫苗", "有机硅", "智能表",
            "民营医院", "民爆用品", "江苏国企改革", "汽车制造概念", "油品升级", "油品改革",
            "油价上调", "河南国企改革", "浙江国企改革", "浦东前滩", "海南旅游岛", "涂料油墨", "消费电子", "消费金融",
            "消防装备", "港口", "湖北国企改革", "湖南国企改革", "炭黑", "猪瘟疫情", "环保包装", "环球主题公园",
            "现代服务业", "珠海国企改革", "甘肃国企改革",
            "生态农业", "生物安全", "电子书", "金融信息服务", "金融IC", "融资租赁", "云南国企改革", "北京国企改革",
            "吉林国企改革", "四川国企改革", "天津国企改革", "安徽国企改革", "山东国企改革", "山西国企改革",
            "广东国企改革", "辽宁国企改革", "一季报预增", "三农",
            "三星", "PM2.5", "传感器", "有线电视网络", "食品及饲料添加剂", "其他生物制品", "造纸Ⅲ", "央视财经50",
            "其他塑料制品", "其他小金属", "仪器仪表Ⅲ", "智慧城市","美丽中国",
            "云计算","比亚迪概念","百度概念","元宇宙","高股息精选"
        ])]

    stock_fupan_df = stock_fupan_df[stock_fupan_df['所属概念'].isin(stock_fupan_df_count["所属概念"].values)]


    df_concept_tdx = pd.merge(stock_fupan_df, real_stock_info_tdx, left_on='股票代码',right_on="code", how='inner')


    mask_20cm = (df_concept_tdx['code'].str.startswith('30') | df_concept_tdx['code'].str.startswith('68')) & (
            df_concept_tdx['change'] > 19.9)
    mask_10cm = (df_concept_tdx['change'] >= 9.9) & (
            df_concept_tdx['code'].str.startswith('60') | df_concept_tdx['code'].str.startswith('00'))

    df_concept_tdx['zt_20cm'] = mask_20cm.astype(int)
    df_concept_tdx['zt_10cm'] = mask_10cm.astype(int)
    df_concept_tdx = df_concept_tdx.query("change != -100")

    avr_change_mean = df_concept_tdx.groupby("所属概念")["change"].mean().reset_index()
    avr_change_mean = avr_change_mean.rename(columns={'change': 'avr_change'})

    df_concept_tdx = df_concept_tdx.groupby('所属概念').apply(lambda x: x.nlargest(10, 'change')).reset_index(drop=True)


    #计算概念相似度
    concept_dict = df_concept_tdx.groupby('所属概念')['股票简称'].apply(list).to_dict()
    binary_matrix = mlb.fit_transform(concept_dict.values())
    concepts = list(concept_dict.keys())
    intersection = np.dot(binary_matrix, binary_matrix.T)
    row_sums = intersection.diagonal()
    union = row_sums[:, None] + row_sums - intersection
    similarity_matrix = intersection / union
    similarity_df = pd.DataFrame(similarity_matrix, index=concepts, columns=concepts)
    df_long = similarity_df.reset_index().melt(id_vars='index', var_name='相似概念', value_name='相似度')

    # 重命名列
    df_long = df_long.rename(columns={'index': '所属概念'})
    df_long.dropna(inplace=True)
    # 过滤掉相似度为0的行（如果需要）
    df_long = df_long[df_long['相似度'] > 0.2]
    df_long.sort_values(by="相似度", ascending=False, inplace=True)


    df_long = df_long.groupby('所属概念')['相似概念'].apply(lambda x: ';'.join(x)).reset_index()



    # group_size = df_concept_tdx.groupby("所属概念").size().reset_index(name='count')
    change_mean = df_concept_tdx.groupby("所属概念")["change"].mean().reset_index()

    df_concept_tdx["change_hs"] = round(
        (df_concept_tdx["price"] - df_concept_tdx["open"]) * 100 / df_concept_tdx["open"], 2)
    change_hs_mean = df_concept_tdx.groupby("所属概念")["change_hs"].mean().reset_index()

    zt_count_10cm = df_concept_tdx[df_concept_tdx['zt_10cm'] == 1].groupby("所属概念").size().reset_index(
        name='zt_10cm')

    zt_count_20cm = df_concept_tdx[df_concept_tdx['zt_20cm'] == 1].groupby("所属概念").size().reset_index(
        name='zt_20cm')
    # 合并所有统计结果
    result = pd.merge(change_mean, stock_fupan_df_count, on="所属概念")
    # result = pd.merge(change_mean, group_size, on="所属概念")
    result = pd.merge(result, change_hs_mean, on="所属概念")
    result = pd.merge(result, zt_count_10cm, on="所属概念", how="left")
    result = pd.merge(result, zt_count_20cm, on="所属概念", how="left")
    result = pd.merge(result, df_long, on="所属概念", how="left")
    result = pd.merge(result, avr_change_mean, on="所属概念", how="left")
    result['zt_10cm'] = result['zt_10cm'].fillna(0).astype(int)  # 填充 NaN 值并转换为整数
    result['zt_20cm'] = result['zt_20cm'].fillna(0).astype(int)  # 填充 NaN 值并转换为整数


    # result = result.query("count > 10 and count<500")

    result = result.sort_values(by='change', ascending=True)
    result.reset_index(drop=True, inplace=True)
    result['change_rps'] = result.index + 1
    result['change_rps'] = round(result['change_rps'] / len(result) * 100, 2)

    result = result.sort_values(by='change_hs', ascending=True)
    result.reset_index(drop=True, inplace=True)
    result['change_hs_rps'] = result.index + 1
    result['change_hs_rps'] = round(result['change_hs_rps'] / len(result) * 100, 2)
    result = result.rename(columns={"所属概念": "concept_name"})

    result = result.round(2)
    result["timestamp"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    r.set("real_market_concept_index_tdx", pickle.dumps(result))
    r.expire("real_market_concept_index_tdx", 3600*8)
    logging.info(f"插入概念数据{len(result)}条")

    result.to_sql("real_market_concept_index_tdx", engine, if_exists='append', index=False,
                  dtype={'timestamp': DateTime()})


if __name__ == "__main__":
    query_date = datetime.datetime.now().strftime('%Y%m%d')
    stock_fupan_df = pickle.loads(r.get(f"stock_panqian:{query_date}"))
    while True:
        try:
            start_time = time.time()
            get_concept_change(query_date,stock_fupan_df)
            end_time = time.time()
            elapsed_time = end_time - start_time
            sleep_time = max(1 - elapsed_time, 0)
            # time.sleep(sleep_time)
        except Exception as e:
            tb_info = traceback.format_exc()
            # 将异常信息和 traceback 信息一起记录
            logging.info(f"An error occurred: {e}\nTraceback info:\n{tb_info}")
            # time.sleep(1)