import argparse
import concurrent.futures
import logging
import os
import time
import datetime
import akshare
import akshare as ak
import pandas as pd
import threading
import configparser
import unittest
from podifan_20days import find_all_buys as find_20_day_line


index = {
    '上证综指': 'sh000001', '深证成指': 'sz399001',
    '沪深300': 'sh000300', '创业板指': 'sz399006',
    '上证50': 'sh000016', '中证500': 'sh000905',
    '中小板指': 'sz399005'
}

config = configparser.ConfigParser()
config.read('config.ini')
DATA_DIR = config['DEFAULT']['DataDir']
LOG_FILE = config['DEFAULT']['LogFile']

logging.basicConfig(filename=LOG_FILE, level=logging.INFO,
                    format='%(asctime)s:%(levelname)s:%(message)s')


def download_index_data(code):
    try:
        stock_df = ak.stock_zh_index_daily(symbol=code)
        stock_df.to_csv(f'{DATA_DIR}/index/{code}.csv', index=False)
        logging.info(f'Downloaded {code}')
        print(f'Downloaded {code}')
    except Exception as e:
        logging.error(f'Error downloading {code}: {e}')


def get_data_index():
    list_df = pd.read_csv('list/index.csv')['指数代码']
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(download_index_data, code) for code in list_df]
        for future in concurrent.futures.as_completed(futures):
            future.result()


def download_stock_data(code):
    try:
        stock_df = ak.stock_zh_a_daily(symbol=code, adjust="qfq")
        stock_df.to_csv(f'{DATA_DIR}/stocks/{code}.csv', index=False)
        logging.info(f'Downloaded {code}')
        # print(f'Downloaded {code}')
    except Exception as e:
        logging.error(f'Error downloading {code}: {e}')


def get_data_stock():
    list_df = pd.read_csv('list/stocks.csv')['代码']
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        futures = [executor.submit(download_stock_data, code) for code in list_df]
        for future in concurrent.futures.as_completed(futures):
            future.result()


def download_etf_data(code):
    try:
        stock_df = ak.stock_zh_index_daily(symbol=code)
        stock_df.to_csv(f'{DATA_DIR}/etf/{code}.csv', index=False)
        logging.info(f'Downloaded {code}')
        print(f'Downloaded {code}')
    except Exception as e:
        logging.error(f'Error downloading {code}: {e}')
        print(f'Error downloading {code}: {e}')


def get_data_etf():
    list_df = pd.read_csv('list/ETF.csv')['代码']
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(download_etf_data, code) for code in list_df]
        for future in concurrent.futures.as_completed(futures):
            future.result()


def update_data_daily():
    # ak.stock_zh_a_spot().to_csv('list.csv', index=False)
    get_data_stock()
    # get_data_index()
    # get_data_etf()


def merge_and_remove_duplicates(list1, list2):
    combined_set = set(list1).union(set(list2))  # 使用 union 方法来合并两个集合从而自动删除重复项
    return sorted(list(combined_set))


def get_date_list():
    path = r'd:\Python\study_data\stocks\\'
    files = os.listdir(path)
    date_all = []
    for file in files:
        if file.endswith('.csv'):
            df = pd.read_csv(path + file)
            date = df.date.to_list()
            date_all = merge_and_remove_duplicates(date_all, date)

    data = pd.DataFrame(date_all, columns=['date'])
    data.to_csv('date.csv')


if __name__ == '__main__':
    update_data_daily()
    get_date_list()
    find_20_day_line('stocks')
