import datetime
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd
import yaml
import os

data = [['num1', 'num2', 'num3', 'num4', 'num5','draw_date', 'sum', 'odd_or_even', 'sum_big_small']]


def load_config(config_path):
    root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    config_path_ = os.path.join(root_path, '../', config_path)
    with open(file=config_path_, mode='r', encoding="utf-8") as f:
        return yaml.safe_load(f)


def init_config():
    config_path = 'config/ssc_config.yaml'
    config = load_config(config_path)
    current_year = config['scraper']['end_year']
    # 获取当前月份和日期
    current_month = datetime.datetime.now().month
    current_day = datetime.datetime.now().day
    if current_month < 10:
        current_month = '0' + str(current_month)
    if current_day < 10:
        current_day = '0' + str(current_day)
    current_date = f"{current_year}-{current_month}-{current_day}"
    start_year = config['scraper']['start_year']

    url = config['scraper']['url']
    output_csv_path = config['scraper']['output_file']

    return url, output_csv_path, current_date, start_year


def scrape_lottery_data(url, output_csv_path, current_date, start_year):
    global data

    """
    爬取指定彩票网站的历史开奖数据并保存为CSV文件
    :param url: 网站URL
    :param output_csv_path: 输出的CSV文件路径
    """
    script_path = os.path.abspath(__file__)
    script_parent_fold = os.path.dirname(script_path)
    project_path = os.path.dirname(os.path.dirname(script_parent_fold))

    temp_current_date = current_date
    while True:
        full_fetch_url = f"{url}{temp_current_date}"
        print(full_fetch_url)
        response = requests.get(url=full_fetch_url, headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
        })
        json_data = response.json()
        print(json_data)
        if json_data.get("errorCode") == -1:
            raise ValueError(json_data.get("message"))

        res_data = json_data['result']['data']
        for item in res_data:
            inner_data = []
            ball_list = item['preDrawCode'].split(',')
            inner_data.extend(ball_list)
            inner_data.append(item['preDrawTime'])
            inner_data.append(item['sumNum'])
            inner_data.append(item['sumSingleDouble'])
            inner_data.append(item['sumBigSmall'])
            data.append(inner_data)
            print(inner_data)
            # break
        last_date = res_data[-1]['preDrawTime']
        # last_date = res_data[-1]
        last_date = last_date.split(' ')[0]
        # 获取 last_date 的 前一天
        last_date_before_day = datetime.datetime.strptime(last_date, '%Y-%m-%d') - datetime.timedelta(days=1)

        temp_current_date = last_date_before_day

        start_date = f"{start_year}-11-01"

        # 将字符串转换为datetime对象
        date1 = datetime.datetime.strptime(start_date, '%Y-%m-%d')
        # date2 = datetime.datetime.strptime(last_date_before_day, '%Y-%m-%d')
        date2 = last_date_before_day

        if date2 < date1:
            print(f"当前时时彩截止到今天，历史期数为{len(data) - 1}期")
            output_csv_path_ = output_csv_path.replace('/', '\\')
            csv_path = os.path.join(project_path, output_csv_path_)
            csv_dir_path = os.path.dirname(csv_path)
            try:
                os.makedirs(csv_dir_path, exist_ok=True)
                if os.path.exists(csv_path):
                    pass
                else:
                    with open(csv_path, 'w') as file:
                        pass
                print(f"文件夹 '{csv_dir_path}' 创建成功或已存在。")
            except Exception as e:
                print(f"创建文件夹 '{csv_dir_path}' 时出错: {e}")
            df = pd.DataFrame(data[1:], columns=data[0])  # 第一行是表头
            df.to_csv(csv_path, index=False)
            print(f"数据保存路径为： {csv_path}")
            break
    # scrape_lottery_data(url,output_csv_path,last_date_before_day,start_year)


def start_scrapy():
    url, output_csv_path, current_date, start_year = init_config()
    scrape_lottery_data(url, output_csv_path, current_date, start_year)


def scrape_ssc_data(url, output_csv_path):
    _, _, current_date, start_year = init_config()
    scrape_lottery_data(url, output_csv_path, current_date,start_year)

# if __name__ == '__main__':
#     start_scrapy()
