"""
靶场练习
https://antispider5.scrape.center/page/1
"""
import re
from typing import Generator

import pandas as pd
import requests
from lxml import etree

from day10.proxy_spider import ProxySpider
from xunter_utils.xunter_requests import ChromeClient


class MovieItem:
    title: str
    content: str


class MovieExcelPipeLine:
    def __init__(self, file_path: str):
        """
        初始化
        :param file_path:保存的文件地址
        """

        # 创建一个writer文件写入对象 数据写入文件
        self.writer = pd.ExcelWriter(file_path)
        self.data = []

    def process_item(self, item):
        """
        数据处理  将豆瓣数据对象存入到self.data中
        :param item:豆瓣数据对象
        :return:
        """
        self.data.append({
            '标题': item.title,
            '内容': item.content,
        })
        return item

    def close_spider(self):
        """
        爬虫结束后将数据写入到Excel文件中 并且关闭文件
        :return:
        """
        df = pd.DataFrame(self.data)
        df.to_excel(self.writer, index=False)
        self.writer.close()

class Antispider5Spider(ChromeClient):
    host = 'https://antispider5.scrape.center'

    def parse(self, response: requests.Response) -> Generator:
        """
        解析数据
        :param response:
        :return:
        """
        html = etree.HTML(response.text)
        # cards = html.xpath('//div[@class="m-t is-hover-shadow"]')   #xpath属性值一般必须写全
        cards = html.xpath('//div[contains(@class,"m-t is-hover-shadow")]')  # 模糊查询  contains包含
        for card in cards:
            yield card.xpath('.//a[@class="name"]/@href')[0]

    def parse_detail(self, response: requests.Response) -> Generator:
        html = etree.HTML(response.text)
        content_div = html.xpath('//div[@class="item el-row"]')[0]

        item = MovieItem()
        item.title = content_div.xpath('.//h2[@class="m-b-sm"]/text()')[0]
        item.content = content_div.xpath('.//div[@class="drama"]/p/text()')[0].strip()
        yield item

class XiaohuanSpider(ChromeClient, ProxySpider):
    host = 'https://ip.ihuan.me/'
    origin_ip = requests.get('http://httpbin.org/get').json()['origin']

    def page_parse(self,response: requests.Response) -> str:
        """
        解析数据
        :return: 解析后得到翻页路由  得到下一页
        """
        html = etree.HTML(response.text)
        page_ip = html.xpath('//ul[@class="pagination"]/li[8]/a/@href')[0]
        print(page_ip[6:])
        return page_ip[6:]

    def get_ip(self,response: requests.Response) -> int:
        res = re.findall(r'<img src=(.*?)<a href=', response.text)
        count = 0

        for i in res:
            lists = re.findall(r'(?<=svg">)\d{1,3}(?:\.\d{1,3}){3}|(?<=<td>)\d+', i)
            ip = '%s:%s' % (lists[0], lists[1])
            if client.check_ip(ip):
                client.proxies[ip] = ['www.xiaohuan.download']
                count += 1
                print(ip,client.proxies[ip])

        return count



if __name__ == '__main__':
    # client = Antispider5Spider('proxy_ips')
    # # print(client.proxies,len(client.proxies))
    # pipline = MovieExcelPipeLine(r'./movies.xlsx')
    #
    # for page in range(1, 11):
    #     res = client.send_request_proxy(f'{client.host}/page/{page}')
    #     print(1)
    #     for detail_url in client.parse(res):
    #         res = client.send_request_proxy(client.host + detail_url)
    #         print(2)
    #         for i in client.parse_detail(res):
    #             print(i.title)
    #             pipline.process_item(i)
    #
    # pipline.close_spider()

    client = XiaohuanSpider(proxy_dir = 'proxy_ips')
    res = client.send_request_proxy('https://ip.ihuan.me/')
    for page in range(1,5):
        client.get_ip(res)
        page_url = client.page_parse(res)
        res = client.send_request_proxy(client.host,params={
            'page': page_url,
        })
        print(f'{client.host}{page_url}')
    client.save_ip_json(f'xiaohuan_ip_page.json')