import csv
import os
import random
import time
import requests as re
from lxml import html
#文件
output_file = 'sogou_baike.csv'

#维护区域
tmp_dict = {'A1': '215467978', 'A2': '215905238', 'A3': '217154662', 'A4': '206175146', 'A5': '216205464',
            'A6': '216292021', 'A7': '177632075', 'A8': '10001608', 'A9': '215948172', 'A10': '208726770',
            'A11': '217068786', 'A12': '216331195', 'A13': '217073415', 'A14': '215933545', 'A15': '216025065',
            'A16': '216092824', 'A17': '217211666', 'A18': '217113435', 'A19': 'v216950927', 'A20': 'v213944060',
            'A21': '201905886', 'A22': '216829987', 'A24': '185134655', 'A26': '208472232', 'A27': '167441880',
            'A30': '216929533', 'A31': '217044396', 'A33': '225237953', 'A34': '225078497', 'A35': '223456153',
            'A36': '185925164', 'A37': '216175360', 'A38': '224267865', 'A39': '217814194', 'A40': '223257573',
            'A41': '215832849', 'A42': '225176014', 'A43': '216133411', 'A44': '215830798', 'A45': '223059550',
            'A46': '216947435', 'A47': '225129218', 'A49': '215832095', 'A51': '217040588', 'A52': '225168188',
            'A53': '199714794', 'A54': '208483303', 'A55': '223040106', 'A56': '223330498', 'A57': '217817656',
            'A58': '225190972', 'A59': '198696289', 'A60': '214327057', 'A61': '225168710', 'A62': '223456263',
            'A63': '215830378', 'A64': '216161063', 'A66': '223358622', 'A67': '216979344', 'A68': '216874471'}


user_agents = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0',
    'Mozilla/5.0 (iPhone; CPU iPhone OS 13_5 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Mobile/15E148 Safari/604.1',
]


need_header = not os.path.exists(output_file) or os.path.getsize(output_file) == 0

with open(output_file, mode='a', newline='', encoding='utf-8',buffering=1) as file:
    csv_writer = csv.writer(file)
    if need_header:
        csv_writer.writerow(['词条名称', '浏览量'])
    for key, value in tmp_dict.items() :
        base_url = f'https://baike.sogou.com/v{value}.htm?ch=frombaikevr&fromTitle={key}'
        print(base_url)
        headers = {
            'X-Requested-With': 'XMLHttpRequest',
            'User-Agent': random.choice(user_agents),
            'Referer': 'https://www.google.com/',
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Cookie':'SUID=CE2D88752254A20B000000006798E491; cuid=AAGVxtpjUQAAAAuiUkCPAgEASQU=; SUV=1738073232709500; SNUID=DA389D61141238D0A0EFC62515DD6052; LSTMV=308%2C196; LCLKINT=679; ssuid=8278481632; IPLOC=CN1200; __tst_status=180692170#; EO_Bot_Ssid=1314062336; ABTEST=8|1750772343|v17'
        }

        with re.get(url=base_url, headers=headers) as response:
            if response.status_code == 200:
                tree = html.fromstring(response.text)
                pv_value = tree.xpath('string(//em[@id="lemma_pv"])')
                if pv_value is None or pv_value == '':
                    print(f'{key}被识别,请求头继续.....')
                    continue
                print(f"{key}: {pv_value}")
                csv_writer.writerow([key, pv_value.strip()])
                # file.flush()
            else:
                print(f"请求失败，状态码: {response.status_code}")
                pass
