import requests  # 请求
import logging  # 日志
import pymysql  # 存储
from urllib.parse import urljoin  # 合并url
from parsel import Selector
from retrying import retry

logging_filename = 'request_douban_movies_logging.txt'  # 日志配置
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s-%(levelname)s:%(message)s')

url = 'https://antispider5.scrape.center/page/{page}'       # 列表页url
detail_url_root = 'https://antispider5.scrape.center'  # 详情页url
total_page = 10
headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'}  # 请求头
db = pymysql.connect(host='cq13292957303.mysql.rds.aliyuncs.com', user='qianqian',
                     password='Chenqian1234', database='test1')
cursor = db.cursor()
# 代理设置
proxy_host = 'tps736.kdlapi.com'
proxy_port = '15818'
proxy_username = 't15018766953329'
proxy_password = '6zblyilp'
proxy = f'http://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}'
proxies = {
    'http': proxy,
    'https': proxy
}


class Spider():
    def Common_html(self, url):  # 通用请求方法
        r = requests.get(url, headers=headers, proxies=proxies)
        logging.info('scrap url:%s', url)  # 日志记录请求url
        return r.text

    @retry(stop_max_attempt_number=5)
    def Index_html(self, page):  # 列表页请求
        print('尝试中')
        index_url = url.format(page=page)
        html = self.Common_html(index_url)
        return self.Detail_urls(html)

    def Detail_urls(self, html):  # 列表页解析
        select = Selector(text=html)
        urls = select.css('.name::attr(href)').getall()
        return urls

    @retry(stop_max_attempt_number=5)
    def Detail_html(self, url):  # 详情页请求
        html = self.Common_html(url)
        data = self.Detail_data(html)
        self.Data_save(data)

    def Detail_data(self, html):  # 详情页解析
        select = Selector(text=html)
        title = select.css('.m-b-sm::text').get().strip()
        rating_num = select.css('[class~="score"]::text').get().strip()
        brif = select.xpath(
            '//h3[contains(text(),"剧情简介")]/following-sibling::p/text()').get().strip()
        data = [title, rating_num, brif]
        logging.info(f'get data::{data}')
        return data

    def Data_save(self, data):  # 数据保存
        # 存在则更新
        sql = 'insert into movies(title,rating_num,brif) values (%s,%s,%s) on duplicate key update title=%s,rating_num=%s,brif=%s'
        cursor.execute(sql, data*2)
        db.commit()

    def run(self):  # 方法调度
        for page in range(1, total_page+1):  # 两层展开的第一层
            detail_urls = self.Index_html(page)
            for detail_url in detail_urls:  # 两层展开的第二次,每一页的条目
                detail_url = urljoin(detail_url_root, detail_url)
                self.Detail_html(detail_url)
        db.close()  # 关闭数据库


if __name__ == '__main__':
    spider = Spider()
    spider.run()
