# -*- coding:utf-8 -*-

import os
import re
import time
import requests
from flask import current_app
from logging.handlers import RotatingFileHandler
from bs4 import BeautifulSoup


# 详情页抓取
class Page(object):

    # 通过URL获取页面代码
    def getPageByURL(self, url):
        res = requests.get(url)
        return res.text

    # 获取最佳回答
    def getGoodAnswer(self, page):
        soup = BeautifulSoup(page, 'html.parser')
        text = soup.select('div.new-goods-answer ul.new-answer-list li')
        pattern = re.compile(u'<li.*?class="good_item".*?<pre>(.*?)</pre>', re.S)
        match = re.search(pattern, str(text))
        # 获取最佳回答内容
        if match:
            ans_text = match.group(1)
            soup_info = soup.select('div.new-goods-answer ul li div.new-user-bar')
            pattern_info = re.compile(u'<div.*?name-txt.*?<a.*?>(.*?)</a><a.*?<p.*?>.*?<em.*?>(.*?)</', re.S)
            match_info = re.search(pattern_info, str(soup_info))
            if match_info:
                print('>>> 最佳答案\n 解决方法：{}，作者：{}，好评率：{}\n'.format(ans_text, match_info.group(1), match_info.group(2)))
                return ans_text, match_info.group(1), match_info.group(2)
            else:
                return None
        else:
            print('找不到最佳答案')
            return None

    # 获取其它回答
    def getOtherAnswers(self, page):
        pass

    def getAnswer(self, url):
        page = self.getPageByURL(url)
        good_ans = self.getGoodAnswer(page)
        other_ans = self.getOtherAnswers(page)
        return good_ans, other_ans


# 蜘蛛
class Spider(object):
    def __init__(self):
        self.next_page = None
        self.page_spider = Page()

    # 获取当前时间
    def getCurrentTime(self):
        return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))

    # 注册日志记录文件
    def registerLog(self):
        file_handler = RotatingFileHandler('logs/out.log', maxBytes=10 * 1024 *1024, backupCount=10)
        current_app.logger.addHandler(file_handler)

    # 通过页码构建网页的URL
    def getPageUrlByNum(self, page_url):
        return 'https://iask.sina.com.cn/c/{}'.format(page_url)

    # 通过传入的页码数来获取网页的HTMl
    def getPageByNum(self, page_url):
        res = requests.get(self.getPageUrlByNum(page_url))
        return res.text

    # 获取每一个问题的详情
    def getQuestionsInfo(self, question):
        pattern = re.compile(u'<a.*?<img alt="(.*?)".*?<a.*?href="(.*?)".*?>(.*?)</a>.*?<span>(.*?)回答</span>.*?>(.*?)</span>', re.S)
        match = re.search(pattern, str(question))
        if match:
            # 提问者
            author = match.group(1)
            # 问题链接
            href = match.group(2)
            # 问题标题
            title = match.group(3)
            # 回答个数
            ans_num = match.group(4)
            # 回答时间
            ans_time = match.group(5)

            return author, href, title, ans_num, ans_time
        else:
            return None

    # 获取当前页的全部问题
    def getQuestions(self, page_url):
        page = self.getPageByNum(page_url)
        soup = BeautifulSoup(page, 'html.parser')
        questions = soup.select('ul.list-group li')
        # 遍历每一个问题
        for question in questions:
            info = self.getQuestionsInfo(question)
            if info:
                # 构建问题的URL
                url = 'https://iask.sina.com.cn/{}'.format(info[1])
                print('问题：{}，提问者：{}'.format(info[2], info[0]))
                self.page_spider.getAnswer(url)

    # 获取到下一页的页码
    def getNextPageNum(self, page):
        current_app.logger.info('{} 正在获取下一个页面，请稍后'.format(self.getCurrentTime()))
        page = self.getPageByNum(page)
        pattern = re.compile(u'<a href="/c/(.*?)" class="btn-page">下一页</a>')
        match = re.search(pattern, page)
        if match:
            current_app.logger.info('{} 获取到下一个有效页面{}'.format(self.getCurrentTime(), match.group(1)))
            return match.group(1)
        else:
            current_app.logger.info('{} 查不到下一页，已经到最后一页了'.format(self.getCurrentTime()))

    def run(self):
        self.registerLog()
        if not os.path.exists('page.txt'):
            os.system(r'echo 1 > page.txt')

        with open('page.txt', 'r') as f:
            page = f.readline()

        current_app.logger.info('开始页码{}'.format(int(page.strip()) - 1))
        current_app.logger.info('爬虫正在启动，开始爬取爱问知识人的问题')
        current_app.logger.info('{} 正在获取目录页面个数，请稍后'.format(self.getCurrentTime()))

        self.next_page = self.getNextPageNum('1069-goodAnswer-1-new.html')
        #self.getQuestions(self.next_page)
        while True:
            self.next_page = self.getNextPageNum(self.next_page)
            if self.next_page is None:
                print('最后一页，停10秒')
                time.sleep(10)
            else:
                print(self.next_page)
                self.getQuestions(self.next_page)
                print('准备下一下，停10秒')
                time.sleep(10)



