import logging
import re
import json
import pymysql as mysql
from crawler.items import*
import crawler.db as db
import crawler.ippool as ippool


class UserSpider(scrapy.Spider):
    name = 'UserSpider'
    allowed_domains = ['place.qyer.com']
    headers = {
        'referer': 'https://place.qyer.com/shanghai/alltravel/'
    }
    custom_settings = {
        'ITEM_PIPELINES': {'crawler.pipelines.UserProfilePipeline': 300}
    }

    # 每次从数据库提取的数据条目
    size = 16
    # 当前爬取的batch大小
    batch_size = 0
    # 每次读取的起点
    start = 1
    # 已经爬取的当前batch的数量
    current = 1

    def __init__(self):
        self.config = {}
        # 打开数据库
        self.db = db.connect('tour')
        self.opened()
        self.pattern = re.compile(r'\s+')

    # 保存配置文件
    def save_config(self):
        self.config['start'] = self.start
        from os import path
        with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'w') as f:
            json.dump(self.config, f, indent=4, separators=(',', ': '))

    # 当爬虫开始工作的时候执行,只有一次
    def opened(self):
        from os import path
        with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'r') as f:
            text = f.read()
        try:
            config = json.loads(text)
            logging.info(f"爬虫:{self.name} 加载配置文件成功!")
        except:
            config = {}
            logging.error(f"爬虫:{self.name} 加载配置文件失败!")
        self.start = config.get('start', 1)
        logging.info("启动配置信息:")
        logging.info(f"start={self.start}")
        self.config = config
        # 初始化数据库查询
        self.init_db()

    # 爬虫结束的时候执行一次
    def closed(self, reason):
        ippool.close()
        self.save_config()
        self.db.close()

    # 生成请求连接
    @staticmethod
    def get_url(user):
        # print(user)
        url = f"https://www.qyer.com/u/{user['tourist_id']}/profile"
        # print(url)
        return url

    def init_db(self):
        # 创建视图
        sql = "create view `tourist_no_profile`\
         as select `tourist_id`, `name` from `tour`.`tourist`\
         where `tourist`.`_update` =0;"
        db = self.db.db
        cursor = self.db.cursor
        try:
            cursor.execute(sql)
        except mysql.err.OperationalError as e:
            if e.args[0] != 1050:
                raise
        # 查询记录数量
        sql = "select count(*) from `tour`.`tourist_no_profile`"
        cursor.execute(sql)
        num = cursor.fetchone()[0]
        logging.info(f"剩余用户数: {num}")

    def next_batch(self):
        cursor = self.db.cursor
        sql = f"select * from `tourist_no_profile` limit {self.start-1}, {self.size};"
        cursor.execute(sql)
        rows = cursor.fetchall()
        length = len(rows)
        self.batch_size = length
        self.current = 1
        if length == 0:
            if self.start == 1:
                logging.info(f'没有数据')
                return None
            else:
                logging.info(f'数据库进行循环查询，start={self.start}')
                self.start = 1
                return self.next_batch()
        self.start = self.start + length
        self.save_config()
        return rows

    def start_requests(self):
        self.current = 1
        batch = self.next_batch()
        if not batch:
            return
        user = {
            'tourist_id': 0,
            'name': ''
        }
        for each in batch:
            user['tourist_id'] = each[0]
            user['name'] = each[1]
            yield scrapy.Request(url=self.get_url(user), headers=self.headers, meta=user, dont_filter=True)
            # logging.info(f"请求, batch={batch}")
        logging.info(f"正在爬取:start={self.start - 1}, batch_size={self.batch_size}")

    @staticmethod
    def format_user(user) -> str:
        return f"name={user.get('name', '')}, tourist_id={user.get('tourist_id', 0)}"

    def parse(self, response):
        request = response.request
        user = {
            'tourist_id': request.meta.get('tourist_id', 0),
            'name': request.meta.get('name', '')
        }
        if request.url == '':
            yield from self.failed(f"请求错误, {self.format_user()}")
            return

        logging.info(f"当前batch: {self.current} / {self.batch_size}")
        if self.current >= self.batch_size:
            yield from self.start_requests()
        else:
            self.current = self.current + 1

        pattern = self.pattern
        try:
            li = response.css('.clearfix.fontArial li')
            title = li.css(".left::text").extract()
            value = []
            for i in range(0, len(title)):
                title[i] = pattern.sub('', title[i])
                v = li[i].css('.right::text').get() or ''
                value.append(pattern.sub('', v))
        except Exception as e:
            logging.error(f"解析response数据错误,{self.format_user(user)}\n li={li}  \nerror={repr(e)}")
            return

        item = TouristItem()
        item['id'] = user['tourist_id']
        try:
            i = title.index('性别：')
            gender = value[i]
            if gender != '':
                item['gender'] = gender
        except:
            pass
        try:
            i = title.index('生日：')
            birthday = value[i]
            if birthday != '' and birthday != '0000年00月00日':
                birthday = birthday.replace('年', '-').replace('月', '-').replace('日', '')
                item['birthday'] = birthday
        except:
            pass
        yield item

    def failed(self, msg: str):
        logging.info(f"用户信息爬取失败, msg={msg}")
        if self.current >= self.batch_size:
            yield from self.start_requests()
        else:
            self.current = self.current + 1


if __name__ == '__main__':
    from scrapy import cmdline
    from os import path
    args = f"scrapy crawl {path.basename(__file__).split('.')[0]}".split()
    cmdline.execute(args)
