from parsel import Selector
from requests import Session
import pymysql
from loguru import logger
from parsel import Selector
from MovieRequest import MovieRequest
from redisQueto import Queto
from urllib.parse import urljoin
from config import *


class Spider():
    redisQueto = Queto()
    s = Session()  # sesion实例
    s.headers.update(headers)
    db = pymysql.connect(host=host, user=user,  # 数据库
                         password=password, database=database)
    cursor = db.cursor()  # 游标
    proxy = f'http://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}'
    proxies = {  # 代理
        'http': proxy,
        'https': proxy
    }

    def download(self, request):
        # 下载组件
        print(request.url)
        prepped=self.s.prepare_request(request) #采用session的prepare_request可以获得一个有状态的prep_req
        return self.s.send(prepped, proxies=self.proxies,timeout=self.timeout)

    def Data_save(self, data):
        # 数据保存,存在则更新
        global num
        num = num+1
        print(f'{num}:{data}')
        try:
            sql = 'insert into movies(title,rating_num,brif) values (%s,%s,%s) on duplicate key update title=%s,rating_num=%s,brif=%s'
            self.cursor.execute(sql, data*2)
            self.db.commit()
        except:
            self.cursor.close()
            self.db.close()

    def err_deal(self, request):
        # 错误处理
        request.fail_time += 1
        logger.debug(
            f'request of {request.url} has failed,fail_time is {request.fail_time}')
        if request.fail_time <= 3:
            self.redisQueto.add(request)

    def index_parse(self, res):
        # 列表页解析
        select = Selector(text=res.text)
        urls = select.css('.name::attr(href)').getall()
        for url in urls:
            request = MovieRequest(url=urljoin(
                detail_url, url), callback=self.Detail_parse)
            yield request

    def Detail_parse(self, res):
        # 详情页解析
        select = Selector(text=res.text)
        title = select.css('.m-b-sm::text').get().strip()
        rating_num = select.css('[class~="score"]::text').get().strip()
        brif = select.xpath(
            '//h3[contains(text(),"剧情简介")]/following-sibling::p/text()').get().strip()
        data = [title, rating_num, brif]
        yield data

    def start_url(self, url):
        # 初次请求
        request = MovieRequest(url=url, callback=self.index_parse)
        self.redisQueto.add(request)

    def engine(self):
        while not self.redisQueto.empty():
            request = self.redisQueto.pop()
            callback = request.callback
            res = self.download(request)
            if not res or res.status_code != 200:
                self.err_deal(request)
            results = list(callback(res))
            for result in results:
                if isinstance(result, MovieRequest):
                    self.redisQueto.add(result)
                if isinstance(result, list):
                    self.Data_save(result)

    def run(self):
        for i in range(1, 11):
            self.start_url(url=url.format(page=i))
        self.engine()
        self.db.close() # 关闭数据存储库
        self.redisQueto.clear() # 删除缓存库


if __name__ == '__main__':
    spider = Spider()
    spider.run()
