# -*- coding: utf-8 -*-
# @Time    : 2023/2/6 20:35
# @Author  : kali
import random
import re
import time

import requests
from lxml import etree
from bs4 import BeautifulSoup

from database_init import db
from models import Tags, Question
from service.tags_service import add_or_update_tags_history, get_tags_data

host = 'https://stackoverflow.com'


def parser_tags_page_data(page=1, first_page=False):
    """
    提取tags（标签）的每一页中的数据
    :arg
        :param page: 页码
        :param first_page: 是否是第一页，是第一页时，将总页码返回
    :return:
    """

    url = 'https://stackoverflow.com/tags'

    if page != 1:
        url = f'https://stackoverflow.com/tags?page={page}&tab=popular'

    headers = {
        "Referer": "https://stackoverflow.com/",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36"
    }
    try:
        # 限速，防止被网站拉黑
        time.sleep(random.randint(1, 6))
        print(f"parser_tags_page_data url:{url}")
        res = requests.get(url, timeout=(16, 32))
        html = etree.HTML(res.text)
        divs = html.xpath('//*[@id="tags-browser"]/div')

        for item in divs:
            # 标签名
            tag = item.xpath("./div[1]/div/a/text()")[0]
            # 标签url
            tag_url = host + item.xpath("./div[1]/div/a/@href")[0]
            # 简介
            introduction = item.xpath("./div[2]/text()")[0].strip()
            # 问题总数
            questions = item.xpath("./div[3]/div[1]/text()")[0].split(' ')[0].strip()
            # 今日新增问题数
            today_asked = item.xpath("./div[3]/div[2]/a/text()")[0].split(' ')[0].strip()
            # 本周新增问题数
            week_asked = item.xpath("./div[3]/div[2]/a/text()")[0].split(' ')[0].strip()

            add_or_update_tags_history(tag, questions, today_asked, week_asked)
            tags = Tags.query.filter(Tags.tag == tag).first()
            if tags:
                tags.tag_url = tag_url
                tags.introduction = introduction
                tags.questions = questions
                tags.today_asked = today_asked
                tags.week_asked = week_asked
                db.session.commit()
                continue
            tags = Tags()
            tags.tag = tag
            tags.tag_url = tag_url
            tags.introduction = introduction
            tags.questions = questions
            tags.today_asked = today_asked
            tags.week_asked = week_asked
            tags.save()

        if not first_page:
            print(f"parser_tags_page_data page:{page},success.")
            return

        page_num_str = html.xpath('//*[@id="tags_list"]/div[2]/a[5]/text()')[0].strip()
        print(f"parser_tags_page_data page:{page},success.")
        return int(page_num_str)
    except Exception as e:
        print(f"parser_tags_page_data error,page:{page},msg:{str(e)}")
        import traceback
        traceback.print_exc()


def tags_spider():
    """
    采集tags数据
    :return:
    """

    # 提取第一页的数据，并获取tags的总页数
    page_num = parser_tags_page_data(first_page=True)
    print(page_num)

    # 从第二页开始
    for page in range(2, page_num + 1):
        parser_tags_page_data(page)
        # 为方便快速测试，只采集10页
        if page > 10:
            break


def parser_tags_detail(url):
    """

    :param url:
    :return:
    """
    try:
        time.sleep(random.randint(1, 3))
        res = requests.get(url, timeout=(16, 32))
        html = BeautifulSoup(res.text, 'lxml')

        # 问题ID
        web_id = url.split('/')[-2]
        # 问题标题
        title = html.find(id='question-header').h1.a.string
        # 问题发起时间
        asked = html.find(name='time', attrs={"itemprop": 'dateCreated'})['datetime']
        # 问题活跃时间（有人回答此时间会更新）
        modified = html.find(name='a', class_='s-link s-link__inherit')['title']
        # 问题访问总量
        viewed = html.find(name='div', class_='flex--item ws-nowrap mb8')['title'].replace(',', '').split(' ')[1]
        # 问题详情 HTML内容
        question = html.find(name='div', class_='s-prose js-post-body').prettify()

        # 问题所属tags标签
        lis = html.find(name='ul', class_='ml0 list-ls-none js-post-tag-list-wrapper d-inline').find_all(name='li')
        tags = [li.a.string for li in lis]
        tags = ','.join(tags)

        # 问题投票数量
        votes = html.find(name='div', attrs={"itemprop": 'upvoteCount'})['data-value']
        # 问题回答数量
        answers = html.find(name='h2', class_='mb0')['data-answercount']

        # 提问题的用户及用户空间链接
        asker_a = html.find(name='div', class_='user-details', attrs={"itemprop": "author"}).a
        asker_name = asker_a.string
        asker_url = host + asker_a['href']

        obj = Question.query.filter(Question.web_id == int(web_id)).first()
        if obj:
            obj.title = title
            obj.asked = asked
            obj.modified = modified
            obj.question = question
            obj.tags = tags
            obj.viewed = int(viewed)
            obj.votes = int(votes)
            obj.answers = int(answers)
            db.session.commit()
            return

        q = Question()
        q.web_id = int(web_id)
        q.url = url
        q.title = title
        q.asked = asked
        q.modified = modified
        q.question = question
        q.tags = tags
        q.viewed = int(viewed)
        q.votes = int(votes)
        q.answers = int(answers)
        q.asker_name = asker_name
        q.asker_url = asker_url
        q.save()
        print(f'parser_tags_detail success,url:{url}')
    except Exception as e:
        print(f"parser_tags_detail error,url:{url},msg:{str(e)}")
        import traceback
        traceback.print_exc()


def parser_tags_detail_page_data(url, first_page=False):
    """
    解析问题列表页面
    :param url:
    :param first_page:
    :return:
    """

    time.sleep(random.randint(1, 3))
    res = requests.get(url, timeout=(16, 32))

    html = BeautifulSoup(res.text, 'lxml')
    titles = html.find_all(name='h3', class_="s-post-summary--content-title")
    for h3 in titles:
        detail_url = host + h3.a['href']
        parser_tags_detail(detail_url)

    if not first_page:
        return

    page_num = html.find_all(name='a', class_='s-pagination--item js-pagination-item')[-2].get_text().strip()
    return int(page_num)


def tags_detail_spider():
    """
    问题详情采集
    :return:
    """

    match_data = {
        "page_size": 10
    }
    data, _ = get_tags_data(match_data)
    data = [{"tag_url": "https://stackoverflow.com/questions/tagged/python"}]
    for item in data:
        url = item.get("tag_url") + "?tab=Votes"
        page_num = parser_tags_detail_page_data(url, first_page=True)

        for page in range(2, page_num + 1):
            url = url + f'&page={page}&pagesize=15'
            parser_tags_detail_page_data(url)
            if page > 10:
                break


if __name__ == '__main__':
    # tags_spider()
    # tags_detail_spider()
    ...
