import time,re
import pandas as pd
import redis
import pickle
import logging
import hashlib
import configparser
import datetime
from sqlalchemy import create_engine, DateTime, String
import pymysql
import requests
import os
import pywencai
import akshare as ak
import pandas as pd
from datetime import datetime,timedelta
import traceback
pymysql.install_as_MySQLdb()

os.environ['PATH'] = '/home/stock/node/bin:$PATH'

log_format = "%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s"
date_format = "%Y-%m-%d %H:%M:%S"  # 精确到秒
logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=date_format)

# 日志文件路径
log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../application.log')

# 创建一个 handler，用于写入日志文件
file_handler = logging.FileHandler(log_file_path)
file_handler.setFormatter(logging.Formatter(log_format, date_format))
# 添加 handler 到 logger
logging.getLogger().addHandler(file_handler)

# 初始化配置解析器
config = configparser.ConfigParser()
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.join(current_dir, os.pardir)
config_path = os.path.normpath(os.path.join(parent_dir, 'config.ini'))
config.read(config_path, encoding='utf-8')


# 获取Redis的配置信息
redis_host = config.get('Redis', 'host')
redis_port = config.getint('Redis', 'port')
redis_db = config.getint('Redis', 'db')
redis_password = config.get('Redis', 'password')
r = redis.Redis(host=redis_host, port=redis_port, db=redis_db, password=redis_password)

mysql_port = config.getint('mysql', 'port')
mysql_host = config.get('mysql', 'host')
mysql_db = config.get('mysql', 'db')
import urllib.parse
mysql_password = urllib.parse.quote(config.get('mysql', 'password'))
mysql_user = config.get('mysql', 'user')
db_url = f'mysql://{mysql_user}:{mysql_password}@{mysql_host}:{mysql_port}/{mysql_db}'

engine = create_engine(db_url,pool_size=20,max_overflow=20,pool_recycle=60)

query_date = '20240424'

stock_fupan_df = pickle.loads(r.get(f"stock_panqian:{query_date}"))
real_stock_info_tdx = pickle.loads(r.get("real_stock_info_tdx"))

v1 = "CPO"
v2 = "铜缆高速连接"
stock_concept_df1 = stock_fupan_df[stock_fupan_df['所属概念'].str.contains(v1, regex=False)]
stock_concept_df2 = stock_fupan_df[stock_fupan_df['所属概念'].str.contains(v2, regex=False)]

merged_df = pd.merge(stock_concept_df1, stock_concept_df2, on='股票代码')
# 计算重合的数据数量
overlap_count = len(merged_df)

print(stock_concept_df1)
print(stock_concept_df2)
print(overlap_count)
# print(stock_concept_df)