# coding=utf-8
import collections
import os
import shutil
import threading
from queue import Queue

import pandas as pd
import akshare as ak
from loguru import logger

from cal_ops.point import cal_point1_txt
from models.stock_model import StockNumber
from mylib import download_all
from mylib.mycsv import sort_csv

st_queue = Queue()


def get_all_txt_files():
    for root, dirs, files in os.walk('txts'):
        return [(os.path.join(root, item), item.replace('.txt', '')) for item in files]


def clear_dir(d='result'):
    if os.path.exists(d):
        shutil.rmtree(d)
    os.makedirs(d)


def get_all_stock_csv_path():
    for root, dirs, files in os.walk('stocks'):
        return [os.path.join(root, item) for item in files]


def get_all_stocks_indus():
    with open('industry_count_rank.csv', 'r') as fr:
        all_indus = [item.strip().split(',') for item in fr.readlines()[1:]]
        return all_indus


def get_avg_top10(stocks, N=50):
    stocks_top_dict = {}
    for stock in stocks:
        csv_path = f'stocks/{stock}.csv'
        if not os.path.exists(csv_path):
            continue
        df2 = pd.read_csv(csv_path)
        stocks_top_dict[stock] = round(abs(df2[0:N]['pct_chg'].mean()), 4)
    sorted_d = sorted(stocks_top_dict.items(), key=lambda x: x[1], reverse=True)
    # 获取近N日振幅排名前10的股票
    return sorted_d[:10]


def get_end_date(log_dir, indus, indus_num):
    f_path = f'{log_dir}/{indus_num}_all_{indus}.csv'
    if not os.path.exists(f_path):
        return None
    with open(f_path, 'r') as fr:
        line = fr.readline()
        return eval(line.split(',')[0])


def t_download(today_date):
    try:
        df = pd.read_csv('cal_ops/all.csv')
        stocks_names = [
            # '石英股份',
            # '松炀资源',
            # '常青科技'
        ]
        stocks = []
        for row in df.index:
            sn = StockNumber(df.loc[row])
            if 'ST' in sn.name:
                continue
            if stocks_names and sn.name in stocks_names:
                stocks.append(sn.ts_code)
            if not stocks_names:
                stocks.append(sn.ts_code)
        stocks = download_all.run(stocks, today_date)
        st_queue.put(('cal_one', len(stocks), stocks))
    except Exception as e:
        logger.error(e)
    st_queue.put((1, 1, 1))


def t_cal():
    log_dir = os.path.basename(__file__).split('.')[0]
    # 需要个股涨跌图打开，一般只计算一天才打开
    gen_excel_flag = True
    bkd = 200
    start_date = 20991230
    # start_date = 20240226
    while 1:
        if not st_queue:
            continue
        indus, indus_num, stocks = st_queue.get()
        if (1, 1, 1) == (indus, indus_num, stocks):
            logger.warning('所有计算完成')
            break
        logger.warning(f't_cal 正在计算【{indus}, {indus_num}】')
        end_date = get_end_date(log_dir, indus, indus_num)
        cal_point1_txt.run(False, gen_excel_flag, log_dir, start_date, end_date, stocks, indus_num, indus=indus,
                           bkd=bkd, N=6)
        logger.warning(f't_cal 计算完成【{indus}, {indus_num}】')


def get_today(d=1):
    if d:
        sz_index_df = ak.index_zh_a_hist(symbol="000001", period="daily")
        df_sorted = sz_index_df.sort_values(by='日期', ascending=False)
        df_sorted.to_csv("shanghai_index.csv", index=False)
    else:
        df_sorted = pd.read_csv('shanghai_index.csv')
    today_date = eval(str(list(df_sorted['日期'])[0]).replace('-', ''))
    return today_date


def get_all_xlsx():
    res = []
    for root, dirs, files in os.walk('main_cal_one'):
        res = [os.path.join(root, item) for item in files if item.endswith('xlsx')]
    return res


if __name__ == '__main__':
    logger.add("log/main_all_txt_{time}.log", level='WARNING')
    today_date = get_today()
    t1 = threading.Thread(target=t_download, args=(today_date,))
    t2 = threading.Thread(target=t_cal, args=())
    t1.start()
    t1.join()
    t2.start()
    t2.join()
    # 计算当前跌幅最大的
    xlsx_list = get_all_xlsx()
    full_path_max_down = f'{today_date}_max_down.csv'
    f_tk = open(full_path_max_down, 'w', encoding='utf-8')
    f_tk.write(f'行业,名称,阶段跌幅,f_price,t_price,link')
    # max_day_dict = collections.defaultdict(int)
    for x in xlsx_list:
        df = pd.read_excel(x, sheet_name='Sheet', engine='openpyxl')
        # all_date = list(df['date'])
        all_price = list(df['price'])
        if all_price[0] < 5:
            continue
        all_pct = list(df['all_pct'])
        min_pct = 0
        min_pct_index = 0
        for idx, pct in enumerate(all_pct):
            if pct < min_pct:
                min_pct = pct
                min_pct_index = idx
        p_arr = os.path.basename(x).replace('.xlsx', '').split('_')
        code_arr = p_arr[-1].split('.')
        hyperlink = f'"https://xueqiu.com/S/{code_arr[1]}{code_arr[0]}"'
        date_arr_hyp = f'=HYPERLINK({hyperlink})'
        # max_day_dict[all_date[min_pct_index]] += 1
        if min_pct_index == 0:
            p_arr[-1] = all_pct[0]
            p_arr.append(all_price[1])
            p_arr.append(all_price[0])
            p_arr.append(date_arr_hyp)
            wstr = f"\n"
            for idx, item in enumerate(p_arr):
                wstr += f',{item}' if idx else item
            f_tk.write(wstr)
    f_tk.close()
    sort_csv(full_path_max_down, ['阶段跌幅'], [True])

    # max_count = f'{today_date}_max_count.csv'
    # f_max_count = open(max_count, 'w', encoding='utf-8')
    # f_max_count.write(f'日期,count')
    # max_day_dict_s = sorted(max_day_dict.items(), key=lambda xa: xa[1])
    # for k, v in max_day_dict_s:
    #     f_max_count.write(f'\n{k},{v}')
    # f_max_count.close()
