import shutil
import requests
from lxml import etree
import re
from spider_1 import SpiderI
from concurrent.futures import ThreadPoolExecutor
import threading
import os

'''
搜索数据分页对象
'''
class PagePaging:

    # 对象数据初始化
    def __init__(self,search_request):
        self.search_request = search_request                          # 搜索字符串
        self.search_HTML = self.request_HTML()                        # 搜索首页页面HTML
        self.data_sum = self.request_datasum()                        # 获取全部页面总数居
        self.page_sum = self.request_pagesumn(self.data_sum)          # 获取HTML总页数
        self.start_urls = self.starturls()                            # 生成所有请求

    # 获取html数据
    def request_HTML(self):
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36',
        }
        url = f'https://flingtrainer.com/page/1/?s={self.search_request}'
        response = requests.get(url, headers=self.headers)
        return response

    # 获取页面总数据
    def request_datasum(self):
        tree = etree.HTML(self.search_HTML.text)
        data_sum = tree.xpath('//div[@class="pad group"]/h1/text()')[1].replace('\t', '').replace('\n', '')
        data_sum = int(re.findall('(.*?) Search results', data_sum)[0])

        return data_sum

    # 获取HTML总页数
    def request_pagesumn(self,data_sum):
        page_sum = (data_sum+14)//15             # 向上取整获取页面总数
        return page_sum

    # 生成所有请求url
    # 控制生成20条url数据
    def starturls(self):
        urls = []
        for i in range(1,self.page_sum+1):
            url = f'https://flingtrainer.com/page/{i}/?s={self.search_request}'
            urls.append(url)
        return urls[:20]


# 发起请求对象
class RequestResponse(PagePaging):

    # 传递参数，初始化父类属性
    def __init__(self,search_request):
        super().__init__(search_request)
        self.spiders = []                             # 爬虫数据存储列表
        self.lock = threading.Lock()                  # 创建线程锁

        # 初始化属性函数
        self.aaa()

    # 创建爬虫对象
    def createspider(self,html=None,url=None):
        if html is not None:
            spider = SpiderI(html=html)
            with self.lock:
                self.spiders.append(spider)
        else:
            spider = SpiderI(url=url)
            with self.lock:
                self.spiders.append(spider)

    # 使用多线程创建爬虫实例
    def aaa(self):
        json_path = os.path.join(os.path.dirname(os.path.abspath('.')), 'datajson')  # json文件路径
        shutil.rmtree(json_path)
        os.makedirs(json_path, exist_ok=True)

        futures = []                                   # 用于存储所有线程任务
        with ThreadPoolExecutor(max_workers=5) as executor:
            for i,url in enumerate(self.start_urls):
                if i == 0:
                    future = executor.submit(self.createspider, html=self.search_HTML)
                    futures.append(future)
                else:
                    future = executor.submit(self.createspider, url=url)
                    futures.append(future)

            # 等待所有线程完成
            for future in futures:
                future.result()






if __name__ == '__main__':
    # PagePaging对象调试
    # page_paging = RequestResponse('black')
    # print('总页数：',page_paging.page_sum)
    # print('数据总条数',page_paging.data_sum)
    # for url in page_paging.start_urls:
    #     print(url)

    # RequestResponse对象调试
    res = RequestResponse('a')
    print(len(res.spiders))
    for i in res.spiders:
        for j in i.data_list:
            print(j)

# 爬虫开始流程
"""
RequestResponse
  |
SpiderI
  |
DataFile
  |
数据保存到文件夹
"""


