from concurrent.futures import  ThreadPoolExecutor
from mootdx.quotes import Quotes
from mootdx import consts
import pandas as pd
import time
import logging
from multiprocessing import Process
import redis
import configparser
import pickle
import datetime  as dt
# from stock_info_collector import StockInfoCollector
from sqlalchemy import create_engine, DateTime, String
import pymysql
import time
from send_email import sendMessage
import akshare as ak

pymysql.install_as_MySQLdb()


log_format = "%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s"
date_format = "%Y-%m-%d %H:%M:%S"  # 精确到秒
logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=date_format)

# 初始化配置解析器
config = configparser.ConfigParser()

# 读取配置文件
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
config.read(current_dir+'/config.ini', encoding='utf-8')


# 获取Redis的配置信息
redis_host = config.get('Redis', 'host')
# redis_host = "192.168.249.10"

redis_port = config.getint('Redis', 'port')
redis_db = config.getint('Redis', 'db')
redis_password = config.get('Redis', 'password')
r = redis.Redis(host=redis_host, port=redis_port, db=redis_db, password=redis_password)
mysql_port = config.getint('mysql', 'port')
mysql_host = config.get('mysql', 'host')
mysql_db = config.get('mysql', 'db')
import urllib.parse
mysql_password = urllib.parse.quote(config.get('mysql', 'password'))
mysql_user = config.get('mysql', 'user')
db_url = f'mysql://{mysql_user}:{mysql_password}@{mysql_host}:{mysql_port}/{mysql_db}'
engine = create_engine(db_url,pool_size=20,max_overflow=20,pool_recycle=60)


def timeit(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        logging.info(f"Elapsed time: {end_time - start_time} seconds")
        return result
    return wrapper

        
# @timeit
def __get_real(client,stock_list,serial_number):
    results =[]
    err_code = []
    batch_size=80
    for i in range(0, len(stock_list), batch_size):
        if i + batch_size >= len(stock_list):
            current_batch = stock_list[i:]
        current_batch = stock_list[i:i + batch_size]
        
        try:
            result = client.quotes(symbol=current_batch)
            if len(result)==0:
                logging.error(stock_list)
                for v in stock_list:
                     res = client.quotes(symbol=v)
                     if len(res)==0:
                         err_code.append(v)
                         logging.info(v)
                    
                
            
            results.append(result)
        except Exception as e:
            logging.error(e)
            sendMessage(f"获取通达信实时数据错误，请检查,{serial_number}进程发生故障")
            continue
        
    rdf = pd.concat(results)
    
    rdf = rdf.drop_duplicates()
    
    logging.info(f"进程{serial_number}获取实时数据：{len(rdf)}条")
    r.set(f"real_stock_info:{serial_number}",pickle.dumps(rdf))
    # r.expire(f"real_stock_info:{serial_number}", 3600*24)
    return err_code


def get_real(stock_list,serial_number):
    while True:
        try:
            if r.get("is_trade_time") ==b"NO":
                time.sleep(1)
                continue
    
            # client = Quotes.factory(market='std')
            client = Quotes.factory(market='std', multithread=True, timeout=0.6)
            start_time = time.time()  # 记录开始时间
            err_code = __get_real(client, stock_list,serial_number)  # 执行函数
            if (len(err_code)>0):
                logging.info("有错误代码")
                logging.info(err_code)
                stock_list = [stock for stock in stock_list if stock not in err_code]
            # if len(stock_list)<5080:
            #     r.set("stock_count",pickle.dumps(stock_zh_a_spot_em_df)) 
            
            end_time = time.time()  # 记录结束时间
            elapsed_time = round(end_time - start_time,2)  # 计算执行时间
            sleep_time = max(1 - elapsed_time, 0)  # 计算需要休眠的时间
            time.sleep(sleep_time)  # 休眠
            
            if(elapsed_time>0.6):
                logging.info(f"执行时间:{elapsed_time}")
                
            del client
        except Exception as e:
            logging.error(e)
            # sendMessage("获取通达信实时数据错误，请检查")
            continue
        

def start_process(stock_slice, index):
    try:
        get_real(stock_slice, str(index))
    except Exception as e:
        logging.error(e)
        # sendMessage("获取通达信实时数据错误，请检查")
        
    
def divide_workload(stock_list, num_processes):
    """将股票代码列表分配给指定数量的进程。"""
    processes = []
    for i in range(num_processes):
        # 每个进程处理的股票代码数目
        workload = len(stock_list) // num_processes
        # 计算每个进程的起始和结束索引
        start_index = i * workload
        # 最后一个进程处理剩下的所有股票代码
        end_index = -1 if i == num_processes - 1 else (i + 1) * workload
        # 创建并启动进程
        p = Process(target=start_process, args=(stock_list[start_index:end_index], i+1))
        # 将进程添加到列表
        processes.append(p)
        p.start()
        
    return processes

     
        
if __name__ == "__main__":
    stock_zh_a_spot_em_df = ak.stock_zh_a_spot_em()
    stock_zh_a_spot_em_df = stock_zh_a_spot_em_df[~stock_zh_a_spot_em_df["代码"].str.startswith("8")]
    stock_zh_a_spot_em_df = stock_zh_a_spot_em_df[~stock_zh_a_spot_em_df["代码"].str.startswith("4")]
    stock_zh_a_spot_em_df = stock_zh_a_spot_em_df[stock_zh_a_spot_em_df["最新价"].notnull()]   
    if len(stock_zh_a_spot_em_df)>5080:                                     
        r.set("stock_count",pickle.dumps(stock_zh_a_spot_em_df)) 
    
    stock_list = stock_zh_a_spot_em_df["代码"].str[0:6].tolist()
    logging.info(f"股票代码总数：{len(stock_list)}")

    try:
        num_processes = 10  # 或者根据需要和你的硬件条件来设置进程数量
        processes = divide_workload(stock_list, num_processes)

        # 等待所有进程完成
        for p in processes:
            p.join()
    except Exception as e:
        logging.error(e)