# -*- utf-8 -*-
# author : li shi jie
# Email : yr785339493@qq.com
import requests
import urllib3
import re
import os
urllib3.disable_warnings()
from multiprocessing import Pool
from collections import Counter
import json

class Spider_yse:
    def __init__(self, start_url):
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        }

        self.base_url = start_url
    # 分类页
    def parse(self):
        response = requests.get(self.base_url, headers=self.headers, verify=False)
        res_ul = re.compile(r'shop-list J_shop-list shop-all-list".*?>(.*?)</ul>', re.S).findall(response.text)
        info_urls = re.compile(r'class="pic".*?>.*?<a.*?href="(.*?)".*?>', re.S).findall(res_ul[0])
        return info_urls
    # 详情页
    def info_parse(self, url):
        self.headers['Cookie'] = 's_ViewType=10; _lxsdk_cuid=16762b59ed2c8-0f8ebba27ce504-6313363-1fa400-16762b59ed3c8; _lxsdk=16762b59ed2c8-0f8ebba27ce504-6313363-1fa400-16762b59ed3c8; _hc.v=04072e44-e58a-bb4a-814a-95ddacf09eee.1543549329; __utma=1.682613202.1543552614.1543552614.1543552614.1; __utmc=1; __utmz=1.1543552614.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmb=1.3.10.1543552614; _lxsdk_s=16762e2060c-2e1-39d-400%7C%7C175'
        phone_url = url.replace('www', 'm')
        res_info = requests.get(phone_url, headers=self.headers, verify=False)
        print(res_info.status_code)
    def run(self, res_sub):
        # 开启进程
        p = Pool(2)
        j = 1
        for url in res_sub:
            print(url)
            p.apply_async(self.info_parse, args=(url,))
            j += 1
        p.close()
        p.join()

if __name__ == '__main__':
    start_url = 'http://www.dianping.com/shijiazhuang/ch10/p1'
    spider = Spider_yse(start_url)
    # res_sub是存储在列表里的详情页地址
    res_sub = spider.parse()
    spider.run(res_sub)
    pn = 2
    # 每页共15个item.判断是否等于15，如果是就有下一页，如果不满15，说明页数到头
    while int(len(res_sub)) == 15:
        current_url = start_url.replace('p1', 'p' + str(pn))
        print('当前是第%s页' % current_url)
        spider = Spider_yse(current_url)
        res_sub = spider.parse()
        spider.run(res_sub)
        pn += 1



















