import requests
import time
import json
import os
from datetime import datetime
from jsssc_test_dan import analyze_lottery_data
# 数据爬取（急速飞艇）
import shutil

def clear_directory(dir_path):
    """
    清空指定目录的所有内容（保留目录本身）
    
    参数:
        dir_path (str): 要清空的目录路径
    """
    # 确保目录存在
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
        return
    
    # 验证路径是目录
    if not os.path.isdir(dir_path):
        raise ValueError(f"路径 '{dir_path}' 不是目录")
    
    # 遍历删除目录内容
    for item in os.listdir(dir_path):
        item_path = os.path.join(dir_path, item)
        
        try:
            if os.path.isfile(item_path) or os.path.islink(item_path):
                os.unlink(item_path)  # 删除文件或符号链接
            elif os.path.isdir(item_path):
                shutil.rmtree(item_path)  # 递归删除子目录
        except Exception as e:
            print(f"删除 {item_path} 失败: {str(e)}")
            # 可根据需要添加错误处理逻辑


# 文件介绍：定义读取和合并JSON文件的函数

def merge_json_files(input_dir, output_file):
	# 初始化一个空列表，用于存储所有的 items
	all_items = []

	# 遍历输入目录中的所有文件
	for filename in os.listdir(input_dir):
		if filename.endswith('.json'):
			file_path = os.path.join(input_dir, filename)

			try:
				with open(file_path, 'r', encoding='utf-8') as f:
					data = json.load(f)

					# 提取 items 数组并添加到 all_items 列表中
					items = data.get("items", [])
					all_items.extend(items)

				print(f"Processed {filename}")

			except json.JSONDecodeError as e:
				print(f"Error decoding JSON in {filename}: {e}")
			except Exception as e:
				print(f"Error processing {filename}: {e}")

	# 将所有 items 合并成一个新的 JSON 文件
	output_data = {
		"items": all_items
	}

	with open(output_file, 'w', encoding='utf-8') as f:
		json.dump(output_data, f, ensure_ascii=False, indent=4)

	return output_file



#获取请求头
def get_headers(cookie_value):
    return  {
    "Accept": "application/json, text/plain, */*",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Cookie": cookie_value,
    "Host": ips,
    "Referer": f"{hosts}/member/history",
    "sec-ch-ua": '"Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"Windows"',
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-origin",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}


# 计算总页数
def calculate_total_pages(total_records, count_per_page):
	return (total_records + count_per_page - 1) // count_per_page


# 发送GET请求并保存结果
def fetch_and_save_data(cookie_value, total_records, count_per_page, lottery_id,save_dir):
	# 计算总页数
	total_pages = calculate_total_pages(total_records, count_per_page)
	print(f"Total pages to fetch: {total_pages}")
	if not os.path.exists(save_dir):
		os.makedirs(save_dir)

	# 循环请求每一页的数据
	for page in range(0, total_pages + 1):
		url = f"https://c345902.sys998.com/api/mem/lottery/history/{lottery_id}?count={count_per_page}&page={page}&lottery_id={lottery_id}"
		headers = get_headers(cookie_value)

		try:
			response = requests.get(url, headers=headers)

			# 检查响应状态码
			if response.status_code == 200:
				data = response.json()

				# 打印返回的结果
				print(f"Fetched page {page}/{total_pages}")

				# 生成文件名
				timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
				filename = f"{save_dir}/{timestamp}_lottery_id_{lottery_id}.json"

				# 保存数据到文件
				with open(filename, 'w', encoding='utf-8') as f:
					json.dump(data, f, ensure_ascii=False, indent=4)

				print(f"Saved data to {filename}")
			else:
				print(f"Request failed with status code: {response.status_code}")
				print(f"Response: {response.text}")

			# 每次请求后休息2秒
			if page < total_pages:
				time.sleep(2)

		except requests.exceptions.RequestException as e:
			print(f"Request error on page {page}: {e}")


def spiderData(cookie_value,lottery_id,ip,host,choose_number):
	try:
		global ips,hosts
		ips = ip
		hosts = host
		total_records = 1100
		count_per_page = 50
		lottery_id = 11
		current_date = datetime.now().strftime('%Y%m%d')
		save_dir = f"spider_ssc_data_{current_date}"
		clear_directory(save_dir)
		fetch_and_save_data(cookie_value, total_records, count_per_page, lottery_id,save_dir)
		output_filename = f"merged_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
		output_file = os.path.join(save_dir, output_filename)
		merge_json_files(save_dir, output_file)
		analyze_lottery_data(save_dir,output_filename,choose_number)
		return True
	except Exception as e:
		print(f"hu - {str(e)}")
		return False 
# 示例调用
if __name__ == "__main__":
	# 替换为实际的变量值
	cookie_value = ("visid_incap_2980419=egSo2iMFQliCP0i77FKHaPjjRmgAAAAAQUIPAAAAAACa4YBjddlB0TgVo4QAkZgU; incap_ses_809_2980419=01R+VsQZd0l+9BfwfiU6C4lfWWgAAAAAMW3JD3vxINKjdx4CaYHaUw==; koa:sess=07f92e5653414e16d2c50e1b87ae67e929015d29f73da31e; nlbi_2980419=P3RTOPPrI3fgLCGdhHMyXgAAAACZOMw9yQCsuYRrg+VHsoFw")
	total_records = 1100    # 总记录数)
	count_per_page = 50  # 每页记录数
	lottery_id = 11  # 固定的lottery_id
	spiderData(cookie_value,lottery_id,'c145902.sys998.com','https://c145902.sys998.com')