# coding:utf8
import requests
import re
from lxml import etree
import random
from tqdm import tqdm
import parsel
import csv

# 创建一个保存文件
# 如果使用 utf-8 保存csv文件乱码，改成 utf-8-sig (这个主要是和使用的软件有关系)
f = open('../项目1/shop_info.csv', mode='a', encoding='utf-8', newline='')

# DictWriter ： 以字典的形式写入
# 第二个参数 fieldnames 这个列表即 csv 表格中的数据的第一行要显示的内容
csv_writer = csv.DictWriter(f, fieldnames=[
    'shop_id',
    'shop_name',
])
# 写入表头
csv_writer.writeheader()


# 发起请求
def post_request(url=None):
    # # 如果啥也不传就是评论url
    # if url == None:
    # 请求头
    headers = {
        'Connection': 'keep-alive',
        'Host': 'www.dianping.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
        'Cookie': '_lxsdk_cuid=181aa7ad838c8-03fafa0ef11406-26021a51-154ac4-181aa7ad838c8; _lxsdk=181aa7ad838c8-03fafa0ef11406-26021a51-154ac4-181aa7ad838c8; _hc.v=adcb4794-bc69-85a6-0f7b-86ad1b044c0a.1657019586; fspop=test; cy=1; cye=shanghai; s_ViewType=10; _lx_utm=utm_source=google&utm_medium=organic; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1657019586,1657895159,1657933584; WEBDFPID=z8207yu903y65126zw8z63z35979y5u1817951y18u297958wz1620u1-1658020027549-1657933626051WSGGCKIfd79fef3d01d5e9aadc18ccd4d0c95071766; dplet=a0db352a6fab9edb296e26e865709e07; dper=35e3a1c591e245be209e4ebd5830e3c49fcb03376b40fecf27d0ce3f14f6114c6d14497b189be3b630778ceb8c6476f7484264f595cab1e501c2dfbf06f8ecc88c7ca64b10e7af7ad1cebc79916465ec348e8866d81bb47916e0b1bfd8804c23; ll=7fd06e815b796be3df069dec7836c3df; ua=dpuser_1653041944; ctu=fc657a6403b5cbc5468e1a6a9c752e264ce8a1b0ee18f7e06f8e9e92269a49ed; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1657934250; _lxsdk_s=182048ad3fb-250-c72-2c||147'
    }


    # 获取响应
    r = requests.get(url=url, headers=headers)
    # r = requests.get(url=url, headers=headers, proxies=ip)
    # print(f'能用的ip地址：{ip}')
    # print("访问成功")
    # 获取文本
    text = r.text.encode("gbk", "ignore").decode("gbk", "ignore")  # 解决报错双重严格限制

    # print(text)

    select = parsel.Selector(text)
    # shop_name = re.search('<h1 class="shop-name">(.*?)</h1>', text)

    # 每页含有15条
    for num in list(range(1, 16)):

        shop_id = select.xpath(f'//*[@id="shop-all-list"]/ul/li[{str(num)}]/div[2]/div[1]/a[1]/@data-shopid').extract()[0]

        shop_name = select.xpath(f'//*[@id="shop-all-list"]/ul/li[{str(num)}]/div[2]/div[1]/a[1]/@title').extract()[0]

        data_dict = {
            'shop_id': shop_id,
            'shop_name': shop_name,
        }
        # 每次写入一行数据，写入到 csv 文件里面
        csv_writer.writerow(data_dict)


if __name__ == '__main__':
    # 网址
    base_url = 'https://www.dianping.com/shanghai/ch95/p'

    for page_num in tqdm(list(range(1, 7))):
        url = base_url + str(page_num)
        post_request(url=url)

    print('写入完成！')

