# 少年中国评论
import random
import re
import time

import requests
from bs4 import BeautifulSoup, NavigableString
from fake_useragent import UserAgent
from mongoDB import MangoDBConnection


class ArticleSpider(object):
    def __init__(self, source):
        self.blog = 1
        self.headers = {}
        self.source = source

    @staticmethod
    def format_str_to_num(str_num: str):
        return int(str_num.replace(',', ''))

    def get_html(self, url, headers):
        if self.blog <= 3:
            try:
                res = requests.get(url, headers=headers)
                return res
            except Exception as e:
                self.blog += 1
                self.get_html(url, headers)
        else:
            print('异常')

    def parse_html(self, url, headers):
        html = self.get_html(url, headers)

        soup = BeautifulSoup(html.content, 'html.parser')

        post = soup.find('div', class_='post')
        title = post.find('div', class_='post-title')
        info = title.find('div', class_='post_icon')
        content = post.find('div', class_='post-content')

        rTime = int(time.mktime(time.strptime(info.find('span', class_='postclock').text.strip(), '%Y-%m-%d'))) * 1000
        eye = self.format_str_to_num(info.find('span', class_='posteye').text.strip())
        comment = self.format_str_to_num(info.find('span', class_='postcomment').text.strip())
        like =  self.format_str_to_num(info.find('span', class_='postlike').text.strip())

        json = {
            'title': title.find('h1', class_='title').text.strip(),
            'content_text': content.text.strip(),
            'content_html': str(content),
            'source': self.source,
            'reprint_time': rTime,
            'eye': eye,
            'comment': comment,
            'like': like,
            'url': url
        }

        return json

    def run(self, urls):
        ua = UserAgent()

        jsons = []

        for url in urls:
            print(url, '-----------------------------------------------')

            self.headers = {'User-Agent': ua.random}
            try:
                json = self.parse_html(url, self.headers)
                jsons.append(json)
                time.sleep(random.randint(2, 6))
            except:
                continue

        return jsons


class DirectorySpider:
    blog = 1
    href = 'https://youngchina.review/archives-page/6466'
    headers = {
    }

    def __init__(self):
        mongoDBClient = MangoDBConnection()
        db = mongoDBClient.myclient['water']
        self.collection = db['directory']

    def get_html(self, headers):
        if self.blog <= 3:
            try:
                print(self.href)
                res = requests.get(self.href, headers=headers)
                return res
            except Exception as e:
                self.blog += 1
                self.get_html(headers)
        else:
            self.blog = 1
            print('获取数据异常')

    def parse_html(self, headers):
        res = self.get_html(headers)

        soup = BeautifulSoup(res.content, 'html.parser')
        # print(soup.select('ul.mod-archive-list'))

        soupUls = soup.select('ul.mod-archive-list')
        soupYears = soup.select('div.mod-archive-year')


        for i in range(len(soupUls)):
            directoryJsons = []
            yaer = soupYears[i].text

            for directory in soupUls[i].find_all('li'):
                soupTime = directory.find('time')
                soupA = directory.find('a')
                soupSpan = directory.find('span')

                print(soupA)

                directoryJsons.append({
                    'title': soupA.text,
                    'url': soupA['href'],
                    'time': yaer + '-' + soupTime['datetime'],
                    'eye': soupSpan.text
                })

            self.collection.insert_many(directoryJsons)

    def run(self):
        ua = UserAgent()
        self.headers['user-agent'] = ua.random
        self.parse_html(self.headers)


