from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

from selenium import webdriver
import numpy as np
import requests
import datetime
import platform
import pymongo
import random
import hashlib
import math
import base64
import codecs
import json
import pprint
import time
import ssl
import sys
import os
import re

sys.setrecursionlimit(1000000)

if platform.system() == 'Linux':
    chrome_driver_path = os.getcwd()[:os.getcwd().rfind('/') + 1] + 'chromedriver_linux'
else:
    chrome_driver_path = os.getcwd()[:os.getcwd().rfind('/') + 1] + 'chromedriver'

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'


def chrome_driver():
    proxy = ''

    options = webdriver.ChromeOptions()
    # options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--disable-images')

    desired_capabilities = options.to_capabilities()

    driver = webdriver.Chrome(desired_capabilities = desired_capabilities, executable_path = chrome_driver_path)
    driver.set_page_load_timeout(20)

    return driver


class ZHUANLAN():
    def __init__(self, users):
        self.log = {}

        self.client = MongoClient(net, 27017, connect = False)

        self.zhihu_zhuanlan_db = self.client['zhihu_zhuanlan_db']
        self.zhihu_zhuanlan_coll = self.zhihu_zhuanlan_db['zhihu_zhuanlan_coll']

        self.zhihu_user_db = self.client['zhihu_user_db']
        self.zhihu_user_coll = self.zhihu_user_db['zhihu_user_coll']

        self.driver = chrome_driver()

        self.users = users

        # for item in self.zhihu_user_coll.find().skip(begin).limit(end - begin):
        #     if 'crawled' in item:
        #         continue
        #
        #     self.users.append(item)

        self.login()

        # time.sleep(2)
        #
        for user in self.users:
            self.crawl_user(user['link'])

    def login(self):
        # 登陆
        try:
            self.driver.get('https://www.zhihu.com/')
        except:
            return self.login()

        if self.driver.title.find('安全验证 - 知乎') != -1:
            self.verification_code()

        self.driver.find_element_by_css_selector('.HomeSidebar-signBannerButton').click()

        time.sleep(1)

        self.driver.find_element_by_css_selector('input[name="username"]').send_keys('13735863577')
        self.driver.find_element_by_css_selector('input[name="password"]').send_keys('1991303017aaa')
        self.driver.find_element_by_css_selector('button[type="submit"]').click()

        time.sleep(2)

        try:
            if self.driver.title.find('安全验证 - 知乎') != -1:
                self.verification_code()
        except:
            pass

        time.sleep(2)

    def verification_code(self):
        time.sleep(2)

        soup = BeautifulSoup(self.driver.page_source, 'lxml')
        code = self.get_code(soup.find(class_ = 'Unhuman-captcha').get('src'))

        self.driver.find_element_by_css_selector('.Unhuman-input input').send_keys(code)
        self.driver.find_element_by_css_selector('button').click()

        time.sleep(2)

    def url2base64(self, picUrl):
        with request.urlopen(picUrl) as web:
            return base64.b64encode(web.read())

    def CalcSign(self, s):
        hl = hashlib.md5()
        hl.update(s.encode(encoding = 'utf-8'))

        return hl.hexdigest()

    def get_code(self, picUrl):
        tm = str(int(time.time()))
        img_data = self.url2base64(picUrl)
        rsp = requests.post('http://pred.fateadm.com/api/capreg', data = {
            'user_id': '103595',
            'timestamp': tm,
            'sign': self.CalcSign('103595' + tm + self.CalcSign(tm + 'qn4iUtbRoSU95do6zEglMUmOLIjkNIiQ')),
            'predict_type': '30400',
            'img_data': img_data
        }).json()

        if rsp['RetCode'] == '0':
            return json.loads(rsp['RspData'])['result']
        else:
            return False

    def crawl_post(self, url):
        print(url)

        if self.zhihu_zhuanlan_coll.find_one({'url': url}):
            return

        try:
            self.driver.get(url)
        except:
            return

        if self.driver.page_source.find('你似乎来到了没有知识存在的荒原') != -1:
            return

        soup = BeautifulSoup(self.driver.page_source, 'lxml')

        if not soup:
            return

        if self.driver.title.find('安全验证 - 知乎') != -1:
            self.verification_code()
            soup = BeautifulSoup(self.driver.page_source, 'lxml')

        try:
            content = re.sub('<img.*?>|\s{2,}|\-{2,}|={2,}', '', soup.find(class_ = 'Post-RichText').get_text()).strip()
        except:
            content = ''

        try:
            title = soup.find(class_ = 'Post-Title').get_text()
        except:
            title = ''

        item = {
            'url': url,
            'text': content.lower(),
            'title': title,
            'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        print(title)
        print()
        print(content)

        if not self.zhihu_zhuanlan_coll.find_one({'url': url}) and content:
            self.zhihu_zhuanlan_coll.insert_one(item)

    def crawl_user(self, url, page = 1):
        while True:
            try:
                self.driver.get('https:' + url + '/posts?page=' + str(page))
            except:
                return self.crawl_user(url, page)

            if self.driver.page_source.find('该用户暂时被反作弊限制') != -1:
                return

            if self.driver.page_source.find('该帐号已停用，主页无法访') != -1:
                return

            soup = BeautifulSoup(self.driver.page_source, 'lxml')

            if self.driver.title.find('安全验证 - 知乎') != -1:
                self.verification_code()
                soup = BeautifulSoup(self.driver.page_source, 'lxml')

            if soup.find(class_ = 'EmptyState'):
                break

            if not len(soup.select('.List .List-item')):
                return

            for item in soup.select('.List .List-item'):
                try:
                    self.crawl_post('https:' + item.find(class_ = 'ContentItem-title').find('a').get('href'))
                except:
                    continue

            page += 1

            time.sleep(1.5)

        self.zhihu_user_coll.update_one({'link': url}, {'$set': {'crawled': True}})


def start(users):
    ZHUANLAN(users)


if __name__ == '__main__':
    pool = []

    client = MongoClient(net, 27017)

    zhihu_user_db = client['zhihu_user_db']
    zhihu_user_coll = zhihu_user_db['zhihu_user_coll']

    users = []

    for item in zhihu_user_coll.find():
        if 'crawled' in item:
            continue

        users.append(item)

    step = math.ceil(len(users) / 1)

    for i in range(1):
        p = Process(target = start, args = (users[i * step: (i + 1) * step],))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
