import asyncio
import hashlib
import time
from hashlib import md5

import aiohttp
import pymysql
from lxml import etree

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61"
                  " Safari/537.36"
}


def md5(script_str):
    m = hashlib.md5()
    m.update(script_str.encode("utf8"))
    return m.hexdigest()


async def get_detail_page(req_url):
    """
    爬取详情页的内容
    :param req_url: 详情页url
    :return:
    """
    async with aiohttp.ClientSession() as session:
        async with await session.get(req_url, headers=headers) as response:
            # print("请求url:%s,响应码是：%d" % (req_url, response.status))
            if response.status == 200:
                html = await response.text()
                # 解析内容
                selector = etree.HTML(html)
                title_path = "//h1[@class='art-title']/text()"
                category_path = "//div[@class='art-meta']/span[@class='item'][2]/a/text()"
                content_path = "//div[contains(@class, 'text')]"
                # 文章标题
                title = selector.xpath(title_path)[0]
                # 文章分类
                category = selector.xpath(category_path)[0]
                # 使用xpath("string(.)")是使用text()获取不到值的解决方案
                # strip() 函数去除空格
                # 文章内容
                content = selector.xpath(content_path)[0].xpath("string(.)").strip()
                save_dict = {'title': title, 'content': content, 'unique_key': md5(req_url)}
                return save_dict


class Reader(object):
    """
    自定义异步迭代器，迭代读取下一页的列表页
    """

    def __init__(self):
        self.next_page = 'http://www.chamang.cn/article/'
        self.detail_page_list = []

    async def get_list_page(self):
        """
        请求列表页的url，并解析，把下一页的url和当前列表页的url保存
        :return:
        """
        async with aiohttp.ClientSession() as session:
            async with await session.get(self.next_page, headers=headers) as response:
                # await+可等待的对象（协程对象、Future、Task对象） 是io等待
                print("请求url:%s,响应码是：%d" % (self.next_page, response.status))
                if response.status == 404:
                    return None
                else:
                    html = await response.text()
                    selector = etree.HTML(html)
                    href_path = "//div[@class='list']/ul/li[@class='img']/a/@href"
                    next_page_path = "//div[@id='pages']/a[@class='nextpage']/@href"
                    # 文章详情页url列表
                    href_list = selector.xpath(href_path)
                    self.detail_page_list = href_list
                    # 下一页的url
                    next_page_url = selector.xpath(next_page_path)

                    if not next_page_url:
                        # 没有下一页
                        self.next_page = None
                    else:
                        self.next_page = next_page_url[0]
                    return self

    def __aiter__(self):
        return self

    async def __anext__(self):
        # 如果下一页为空
        if self.next_page is None:
            raise StopAsyncIteration

        val = await self.get_list_page()
        if val is None:
            raise StopAsyncIteration
        return val


async def mysql_insert_one_by_one(data):
    """单条入库，可避免重复"""
    db = pymysql.connect("localhost", "root", "root", "fastadmin")

    # 使用cursor()方法获取操作游标
    cursor = db.cursor()
    # 成功条数
    suc_total = 0
    # 失败条数
    fail_total = 0
    for value in data:
        one = ([value['title'], value['content'], value['unique_key'], int(time.time())])
        sql = 'insert into fa_article(title,content,unique_key,createtime) values(%s,%s,%s,%s)'
        try:
            # 执行sql语句
            res = cursor.execute(sql, one)
            # 提交到数据库执行
            db.commit()
            suc_total += res

        except Exception as err:
            # 如果发生错误则回滚
            fail_total += 1
            db.rollback()

    # 关闭数据库连接
    cursor.close()
    db.close()
    return suc_total, fail_total


async def main():
    # 列表页迭代器
    obj = Reader()
    # 遍历时即启动迭代器，不断得到下一页的列表页
    async for item in obj:
        start = time.time()
        tasks = []
        for detail_pages in item.__getattribute__('detail_page_list'):
            # 得到多个详情页，启用协程任务对象保存
            tasks.append(get_detail_page(detail_pages))
        # 等待任务列表完成
        done, padding = await asyncio.wait(tasks)
        # 得到数据
        res_list = []
        for res in done:
            res_list.append(res.result())
        # 入库
        suc, fail = await mysql_insert_one_by_one(res_list)
        print('入库条数' + str(suc))
        print('失败条数' + str(fail))

        end = time.time()
        print("总耗时：", end - start)


asyncio.run(main())
