import datetime
import json
import re
import time
from collections import Counter

import pandas
import requests
from lxml import etree

index = 0
post_count = Counter()
table = pandas.DataFrame(columns=['日期', '贴数'], index=None)


def get_count(page=1):
    global table, post_count
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "Connection": "keep-alive",
        "Referer": "http://mguba.eastmoney.com/",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36 Edg/118.0.2088.57"
    }
    cookies = {
        "qgqp_b_id": "89a3dbb8d6e699f88ef83ba99e436725",
        "st_si": "48309338249586",
        "st_asi": "delete",
        "st_pvi": "30067746987554",
        "st_sp": "2023-10-22%2015%3A50%3A03",
        "st_inirUrl": "https%3A%2F%2Fguba.eastmoney.com%2Fo%2Flist%2Cbk0896%2C1179455716.html",
        "st_sn": "58",
        "st_psi": "2023102217452965-117001356556-3273529891"
    }
    url = f"http://guba.eastmoney.com/list,bk0896_{page}.html"
    response = requests.get(url, headers=headers, cookies=cookies, verify=False)
    response.encoding = "utf-8"
    # print(response.text)
    dom = etree.HTML(response.text)
    script_text = dom.xpath("/html/body/script[1]/text()")
    script_text = "".join(script_text)
    match = re.search(r'var article_list=(\{.*?\});', script_text, re.S)
    json_data = match.group(1)
    json_data = json.loads(json_data)
    if (json_data['bar_name'] != '白酒'):
        print(json_data)
        print(f"ip被限制,已保存到第{page}页")
        return False
    for i in json_data['re']:
        post_time = datetime.datetime.strptime(i['post_last_time'], "%Y-%m-%d %H:%M:%S")
        date_str = post_time.strftime("%Y-%m-%d")
        post_count[date_str] += 1


def save_data():
    # 输出每一天的发帖条数
    global index
    global table, post_count
    for date, count in post_count.items():
        table.loc[index] = [date, count]
        index = index + 1


if __name__ == '__main__':
    for i in range(501, 542):
        try:
            print(f"下载第{i}页数据中...")
            result = get_count(page=i)
            if result == False:
                save_data()
                table.to_csv("数据500-541.csv")
                break
            time.sleep(3)
        except Exception as e:
            print(f"继续{e}")
    save_data()
    table.to_csv("数据500-541.csv")
