import aiohttp
import asyncio
import requests
import xlrd
import xlwt
import time
import os
import re
import shutil


def open_excel_to_list(file):
    '''
    读取作者，以及需要处理的url
    :param file:
    :return: author:str, urlList：[str]
    '''
    data = xlrd.open_workbook(file)
    table = data.sheet_by_index(0)
    rows = table.nrows
    author = str(table.cell(0, 1).value).strip()  # 获取作者
    url_list = [str(table.cell(i, 0).value).strip() for i in range(1, rows)]  # 获取url集合
    return author, url_list


def save_list_to_excel(answer, author, to):
    '''
    保存结果
    :param answer: 任务url
    :param to: 保存地址
    :return: None
    '''
    new_work = xlwt.Workbook()
    work_sheet = new_work.add_sheet("Sheet1")
    work_sheet.write(0, 0, "作者:")
    work_sheet.write(0, 1, author)
    row = 1
    for url in answer:
        work_sheet.write(row, 0, url)
        row += 1

    new_work.save(to)


class Spider(object):

    def __init__(self, path: str, to=None):
        self.path = path

        self.author, self.url_list = open_excel_to_list(path)

        self.next = 100

        self.headers = {  # 加上strip防止空格
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                          "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36".strip(),
            "sec-fetch-user": "?1".strip(),
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image"
                      "/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9".strip(),
            "accept-encoding": "gzip, deflate, br".strip(),
            "upgrade-insecure-requests": "1".strip(),

        }

        self.cookie = self.get_cookie()

        # 获取一个cookie

        self.answer = []  # 结果

        if to is None:  # 添加输出地址
            self.to = path + ".out.xls"
        else:
            self.to = to

    def get_cookie(self):
        '''
        获取cookie
        :return: dist[cookie]
        '''
        url = self.url_list[0]
        req = requests.get(url, headers=self.headers)
        cookie = requests.utils.dict_from_cookiejar(req.cookies)  # 解析成字典
        return cookie

    async def crawl(self):
        size = len(self.url_list)
        async with aiohttp.ClientSession(headers=self.headers, cookies=self.cookie) as session:
            for i in range(0, size, self.next):  # 每次处理100个
                j = i + self.next
                if j > size:  # 防止出界
                    j = size

                tasks = [asyncio.ensure_future(self.get_url(url, session)) for url in self.url_list[i:j]]
                await asyncio.wait(tasks)
                time.sleep(3)
                if j == size:
                    break

    async def get_url(self, url, session):
        try:
            async with session.get(url) as resp:
                content = await resp.text()
                if resp.status == 200:
                    # 成功请求分析结果
                    if content.find(self.author) != -1:  # 找到改回答
                        self.answer.append(url)

                elif resp.status == 302:
                    # 重定向请求加入loop
                    print(resp.headers.get("location"))
                elif resp.status == 404:  # 无效请求
                    return
                else:  # 错误的请求
                    print("请求失败状态码: {}, 内容：{}\n url:{}".format(resp.status, content, url))
                    return
        except aiohttp.client.ClientOSError:
            print("发生一个ClientOSError", url, "重新读取")
            asyncio.create_task(self.get_url(url, session))

    def run(self):
        loop = asyncio.get_event_loop()
        loop.run_until_complete(self.crawl())

        save_list_to_excel(self.answer, self.author, self.to)  # 保存

        # 创建并保存文件夹
        dir_name = re.search(r"(\d+)", self.path)
        if dir_name is None:
            dir_name = hash(self.path)

        dir_name = dir_name.group()  # 获取数字部分

        isExists = os.path.exists(dir_name)

        # 判断结果
        if not isExists:
            os.makedirs(dir_name)

        work_dir = os.getcwd()  # 获取绝对路径
        src = os.path.join(work_dir, self.path)  # 转移源文件
        dst = os.path.join(work_dir, dir_name, self.path)
        shutil.move(src, dst)

        src = os.path.join(work_dir, self.to)
        dst = os.path.join(work_dir, dir_name, self.to)
        shutil.move(src, dst)

    def out_put(self):
        return self.answer


def main():
    files = os.listdir(os.getcwd())  # 读取当前文件夹文件
    for fil in files:
        if fil.endswith(".xls"):
            s = Spider(fil)
            s.run()
            print(fil, "OK")


if __name__ == '__main__':
    main()
