'''
create database kfcdb charset utf8;
use kfcdb;
create table kfc_tab(
rownum varchar(200),
storeName varchar(200),
addressDetail varchar(200),
pro varchar(200),
provinceName varchar(200),
cityName varchar(200)
)charset=utf8;
'''
import requests
import time
import random
from fake_useragent import UserAgent
from lxml import etree
import pymysql

class KfcSpider:
    def __init__(self):
        self.post_url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname'
        self.get_url = 'http://www.kfc.com.cn/kfccda/storelist/index.aspx'
        # 连接mysql
        self.db = pymysql.connect('localhost', 'root', '123456', 'kfcdb', charset='utf8')
        self.cur = self.db.cursor()
        self.ins = 'insert into kfc_tab values(%s,%s,%s,%s,%s,%s)'

    def get_headers(self):
        # 功能函数
        headers = {"User-Agent": UserAgent(path="fake_useragent.json").random}

        return headers

    def get_total_page(self, city):
        """获取总页数"""
        headers = self.get_headers()
        data = {
            'cname': city,
            'pid': '',
            'pageIndex': '1',
            'pageSize': '10',
        }
        html = requests.post(url=self.post_url,data=data, headers=headers).json()
        # 门店总数量
        count = html['Table'][0]['rowcount']
        # 计算页数
        total_page = count//10 if count%10==0 else count//10 + 1

        return total_page

    def get_city_list(self):
        """获取所有城市名称的列表"""
        headers = self.get_headers()
        html = requests.get(url=self.get_url, headers=headers).text
        # xpath解析提取数据
        eobj = etree.HTML(html)
        city_list = eobj.xpath('//div[@class="shen_city"]/a/text()')

        return city_list

    def parse_html(self):
        headers = self.get_headers()
        # 所有全国所有城市
        # city_list: ['北京','安庆','天津',...]
        city_list = self.get_city_list()
        for city in city_list:
            # 获取总页数
            total_page = self.get_total_page(city)
            for page in range(1, total_page + 1):
                data = {
                    'cname': city,
                    'pid': '',
                    'pageIndex': str(page),
                    'pageSize': '10',
                }
                html = requests.post(url=self.post_url, data=data, headers=headers).json()
                for one_shop_dict in html['Table1']:
                    print(one_shop_dict)
                    # 存入mysql数据库
                    li = [
                        one_shop_dict['rownum'],
                        one_shop_dict['storeName'],
                        one_shop_dict['addressDetail'],
                        one_shop_dict['pro'],
                        one_shop_dict['provinceName'],
                        one_shop_dict['cityName'],
                    ]
                    self.cur.execute(self.ins, li)
                    self.db.commit()

                # 控制频率
                time.sleep(random.uniform(0, 1))

    def crawl(self):
        self.parse_html()
        # 断开数据库连接
        self.cur.close()
        self.db.close()

if __name__ == '__main__':
    spider = KfcSpider()
    spider.crawl()














