# 引入所需要的包
from bs4 import BeautifulSoup as bs
import requests
import re

# 定义从url中获取数据的函数
def get_data(url):
    headers = {
        'accept': '*/*',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cookie': 'Hm_lvt_692bd5f9c07d3ebd0063062fb0d7622f=1684658903; Hm_lvt_12e4883fd1649d006e3ae22a39f97330=1684658903; _ga=GA1.2.1949381055.1684658903; _gid=GA1.2.1416767338.1684658903; PHPSESSID=srnhfl7a6dmqot87kpaflvmb27; Hm_lpvt_692bd5f9c07d3ebd0063062fb0d7622f=1684659990; _gat_UA-66069030-3=1; KLBRSID=13ce4968858adba085afff577d78760d|1684659988|1684659988; Hm_lpvt_12e4883fd1649d006e3ae22a39f97330=1684659991',
        'referer': 'https://www.zhcw.com/',
        'sec-fetch-mode': 'no-cors',
        "sec-fetch-site": 'same-site',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36'
    }
   # 爬取内容 
    r = requests.get(url, headers=headers)
    print(r.status_code)
    if r.status_code == 200:
        fname = 'lottery'
        # 由于要一次写入四个url中的那日容，所以以追加的方式打开
        # 但由于使用的是追加的方式，每次重新启动程序的时候要记得清理文件内容
        fo = open(fname, "a")
        for i in r.text:
            # 将网页内容写入文件中
            fo.writelines(i)
            # 关闭文件，刷新文件缓冲区
        fo.close()


if __name__ == "__main__":
    # 从网页上爬取的四个保存中奖信息的url，将他们储存到列表中便于打印
    urls = ["https://jc.zhcw.com/port/client_json.php?callback=jQuery112207867352397496448_1684809435552&transactionType=10001001&lotteryId=1&issueCount=100&startIssue=&endIssue=&startDate=&endDate=&type=0&pageNum=1&pageSize=30&tt=0.7021619423091936&_=1684809435554",
            "https://jc.zhcw.com/port/client_json.php?callback=jQuery112207867352397496448_1684809435552&transactionType=10001001&lotteryId=1&issueCount=100&startIssue=&endIssue=&startDate=&endDate=&type=0&pageNum=2&pageSize=30&tt=0.3475163357475257&_=1684809435555",
            "https://jc.zhcw.com/port/client_json.php?callback=jQuery112207867352397496448_1684809435552&transactionType=10001001&lotteryId=1&issueCount=100&startIssue=&endIssue=&startDate=&endDate=&type=0&pageNum=3&pageSize=30&tt=0.5846911647323787&_=1684809435556",
            "https://jc.zhcw.com/port/client_json.php?callback=jQuery112207867352397496448_1684809435552&transactionType=10001001&lotteryId=1&issueCount=100&startIssue=&endIssue=&startDate=&endDate=&type=0&pageNum=4&pageSize=30&tt=0.5882644880951045&_=1684809435557"]
            # 对于每一个url，用循环的方式逐个读取信心写入到文件中
    for e in urls:
        get_data(e)
    # 打开存储中将信息的文件，通过正则表达式的方法读取中将号码
    fo = open('lottery', "r")
    data_txt = fo.read()
    print(data_txt)
    pattern = r'\"frontWinningNum\":\"([\d\s]{17})\"'
    data_match = re.findall(pattern, data_txt)
    print(f"find {len(data_match)} matched numbers")

    
    with open('winner.txt', 'w') as f:
        # 将列表中的元素转换为字符串并用换行符'\n'连接起来
        # 将整个字符串写入文件中 with语句快结束，
        # 文件将自动关闭
        f.write('\n'.join(map(str, data_match)))
    with open("winner.txt", "r") as f:
        # 读取文件中的内容
        numbers = f.readlines()
    counts = {}
    # 将文件中各个中奖号码出现的次数通过字典统计起来
    for line in numbers:
        nums = line.strip().split()
        for num in nums:
            if num in counts:
                counts[num] +=1
            else:
                counts[num] = 1
    # 对字典进行排序并找出出现次数最多的前十个数字作为预测结果
    top_ten = sorted(counts, key=counts.get,reverse=True)[:7]
    print("预测结果",top_ten)
     