"""
网址
    https://jiameng.baidu.com/
方式：
    通过接口进行访问
"""
import requests
import json
import time
import re
import csv

"""
爬取文件内容
"""
class JiaMeng():
    def __init__(self, category, filename=None):
        self.filename = filename
        if category:
            self.category = category
        else:
            print('传入有效行业id')
            return
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36"
        }

    def get_url(self, pages):
        url_list = []
        for pn in range(1, pages + 1):
            url = "https://jiameng.baidu.com/portal/search?ajax=1&accessid=0107712167D4&device=pc&strategy=list&from=jmx&pageSize=20&page={}&category={}".format(
                pn, self.category)
            url_list.append(url)
        return url_list

    def save_file(self, c):
        with open('加盟星-{}.json'.format(self.filename), 'w', encoding='utf-8') as f:
            f.write(json.dumps(c, ensure_ascii=False))

    def open_file(self):
        try:
            with open('加盟星-{}.json'.format(self.filename), 'r', encoding='utf-8') as f:
                return f.read()
        except Exception as e:
            print(e)

    def repeated_filter(self, c, file_c):
        for i in file_c:
            file_project_id = i['project_id']
            if c['project_id'] == file_project_id:
                print('出现重复数据:', c)
                return True

    def requests_url(self, url):
        res = requests.get(url, headers=self.headers).json()
        if res:
            return res
        else:
            print('未获取数据', url)

    def get_page(self, res):

        totalNum = res['data']['totalNum']
        a = totalNum / 20
        b = totalNum // 20
        if a > b:
            b += 1
            print('当前总页数：',b)
            return b


    def parse(self, url):
        res = self.requests_url(url)
        code = res.get('code')
        if code == 0:

            file_content = self.open_file()
            if file_content:
                file_content = json.loads(file_content)
            else:
                file_content = []
            data_list = res['data']['list']

            for data in data_list:
                # print(data)
                project_id = data['project_id']  # 去重
                name = data['name']
                company = data['company']['company_name']

                area = ''
                try:
                    area_list = data['metas']['area']
                    for a in area_list:
                        area += a['name'] + '、'
                except:
                    area = '全国、'

                content = {
                    "project_id": project_id,
                    "name": name,
                    "company": company,
                    "area": area,
                }
                if self.repeated_filter(content, file_content):
                    pass  # 重复得返回True  过滤掉
                else:
                    file_content.append(content)
            self.save_file(file_content)
        else:
            print('获取数据失败状态码： ', code)
    def requests_one(self):
        # 请求第一页  计算获取页数
        url = "https://jiameng.baidu.com/portal/search?ajax=1&accessid=0107712167D4&device=pc&strategy=list&from=jmx&pageSize=20&page=1&category={}".format(
            self.category)
        res = self.requests_url(url)
        return self.get_page(res)

    def run(self):
        pages = self.requests_one()
        url_list = self.get_url(pages)
        index = 1
        for url in url_list:
            print('第{}页，爬取url：{}'.format(index, url))
            self.parse(url)
            time.sleep(2)
            index+=1

"""
写入文件内容
"""
class SaveFile():
    def __init__(self, filename):
        self.filename = filename
        self.file = open('广州佛山-{}.csv'.format(self.filename), 'a', encoding='utf-8-sig', newline='')
        self.csv_file = csv.writer(self.file)
        self.csv_file.writerow(['公司名', '店铺名', '区域'])

        self.file22 = open('{}全部.csv'.format(self.filename), 'a', encoding='utf-8-sig', newline='')
        self.csv_file22 = csv.writer(self.file22)
        self.csv_file22.writerow(['公司名', '店铺名', '区域'])

    def close_file(self):
        self.file.close()
        self.file22.close()

    def open_file(self):
        try:
            with open('加盟星-{}.json'.format(self.filename), 'r', encoding='utf-8') as f:
                return f.read()
        except Exception as e:
            print(e)

    def writer_file(self):
        res = self.open_file()
        if res:
            result = json.loads(res)
            print(len(result))
            for i in result:
                company = i['company']
                if re.findall('广州|佛山', company):
                    print(i)
                    self.csv_file.writerow([i['company'], i['name'], i['area']])

                self.csv_file22.writerow([i['company'], i['name'], i['area']])

    def run(self):
        self.writer_file()
        self.close_file()

if __name__ == '__main__':
    print(1111)

    rrr = [
        # (7,20906,'小吃'),
        # (13,20907, '中吃'),
        # (10,20908, '快餐'),
        # (8,20909, '饮品奶茶'),
        # (10,20904, '米粉面条'),
        # (6,20903, '西餐、料理'),
        # (11, 20910, '火锅'),
        # (20902, '麻辣烫'),
        # (20913, '面包甜品'),
        # (20905, '烧烤烤鱼'),
        # (20911, '串串香'),
        # (20912, '海鲜'),
        # (20901, '茶餐厅'),
        # (20914, '燕窝'),
        # (20915, '炸鸡、汉堡'),
        # (20916, '包点饺子'),
        # (20917, '汤类粥类'),
        # (20918, '轻食'),
        # (20919, '自助餐'),
        # (20920, '熟食卤味'),
        ##########################  服饰
        (20804, '品牌服饰', '服饰'),
        (20806, '女装', '服饰'),
        (20805, '男装', '服饰'),
        (20803, '鞋包配饰', '服饰'),
        (20809, '窗帘布艺', '服饰'),
        (20802, '皮具', '服饰'),
        (20808, '童装童鞋', '服饰'),
        (20810, '定制服装', '服饰'),
        (20811, '运动装', '服饰'),
    ]
    for r in rrr:
        jm = JiaMeng(r[0], filename=r[-1])
        jm.run()
    else:
        sf = SaveFile(  '服饰'  )
        sf.run()