from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient

from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

import numpy as np
import platform
import requests
import datetime
import pymongo
import hashlib
import random
import base64
import codecs
import pprint
import json
import math
import time
import ssl
import sys
import os
import re

sys.setrecursionlimit(1000000)

if platform.system() == 'Linux':
    chrome_driver_path = os.getcwd()[:os.getcwd().rfind('/') + 1] + 'chromedriver_linux'
else:
    chrome_driver_path = os.getcwd()[:os.getcwd().rfind('/') + 1] + 'chromedriver'

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'


def chrome_driver():
    options = webdriver.ChromeOptions()
    # options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--disable-images')
    options.add_argument('--no-sandbox')
    options.add_argument('--disable-dev-shm-usage')

    if platform.system() == 'Linux':
        options.binary_location = '/snap/bin/chromium'

    desired_capabilities = options.to_capabilities()
    desired_capabilities['loggingPrefs'] = {'performance': 'ALL'}

    return webdriver.Chrome(chrome_driver_path, desired_capabilities = desired_capabilities)


class POST():
    def __init__(self, users):
        self.log = {}

        self.client = MongoClient(net, 27017, connect = False)

        self.toutiao_zhuanlan_db = self.client['toutiao_zhuanlan_db']
        self.toutiao_zhuanlan_coll = self.toutiao_zhuanlan_db['toutiao_zhuanlan_coll']

        self.toutiao_user_db = self.client['toutiao_user_db']
        self.toutiao_user_coll = self.toutiao_user_db['toutiao_user_coll']

        self.driver = chrome_driver()

        self.driver.execute_script("""
            $.ajaxSetup({
                contentType: "",
                complete: function (XMLHttpRequest, textStatus) {
                    console.log(XMLHttpRequest, textStatus);
                }
            });
        """)

        self.users = users

        for user in self.users:
            params = self.parse_params(user['id'])

            self.crawl_post_list(params)
            self.toutiao_user_coll.update_one({'id': user['id']}, {'$set': {'crawled': True}})

    def crawl_post_list(self, params):
        url = 'https://www.toutiao.com/c/user/article/?' + parse.urlencode(params)

        try:
            self.driver.get(url)
        except:
            return self.crawl_post_list(params)

        soup = BeautifulSoup(self.driver.page_source, 'lxml')

        try:
            resp = json.loads(soup.body.get_text())
        except:
            return

        if 'message' in resp and resp['message'] != 'success':
            return

        if 'data' in resp and not resp['data']:
            return

        for article in resp['data']:
            if article['article_genre'] == 'video':
                continue

            self.crawl_post('https://www.toutiao.com/' + article['source_url'], article['title'])

        if 'has_more' in resp and resp['has_more']:
            params['max_behot_time'] = resp['next']['max_behot_time']

            self.crawl_post_list(params)

    def crawl_post(self, url, title):
        if not url or not title:
            return

        if self.toutiao_zhuanlan_coll.find_one({'url': url}):
            return

        try:
            self.driver.get(url)
        except:
            return self.crawl_post(url, title)

        time.sleep(1)

        soup = BeautifulSoup(self.driver.page_source, 'lxml')

        if soup.find(class_ = 'article-content'):
            item = {
                'title': title,
                'text': soup.find(class_ = 'article-content').get_text().strip().lower(),
                'url': url
            }

            if not self.toutiao_zhuanlan_coll.find_one({'url': url}):
                self.toutiao_zhuanlan_coll.insert_one(item)

                pprint.pprint(item)
                print('-' * 110)

    def parse_params(self, user_id):
        url = 'https://www.toutiao.com/c/user/' + str(user_id) + '/'
        self.driver.get(url)
        time.sleep(3)

        for entry in self.driver.get_log('performance')[::-1]:
            message = json.loads(entry['message'])['message']['params']

            if not 'request' in message:
                continue

            if not 'url' in message['request']:
                continue

            url = message['request']['url']

            if url.find('https://www.toutiao.com/c/user/article/') != -1:
                return {k: v[0] for k, v in parse.parse_qs(parse.urlparse(url).query).items() if len(v)}


def start(users):
    POST(users)


if __name__ == '__main__':
    pool = []

    client = MongoClient(net, 27017)

    toutiao_user_db = client['toutiao_user_db']
    toutiao_user_coll = toutiao_user_db['toutiao_user_coll']

    users = []

    for item in toutiao_user_coll.find():
        if 'crawled' in item:
            continue

        users.append(item)

    step = math.ceil(len(users) / 1)

    for i in range(1):
        p = Process(target = start, args = (users[i * step: (i + 1) * step],))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
