from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse
import jieba
import jieba.posseg as pseg

from selenium import webdriver
import numpy as np
import requests
import datetime
import pymongo
import random
import math
import codecs
import json
import pprint
import time
import ssl
import sys
import os
import re

client = MongoClient('localhost', 27017, connect = False)
weibo_db = client['weibo_db']
weibo_pic_text_coll = weibo_db['weibo_pic_text_coll']

chanpin100_bolg_db = client['zhihu_bolg_db']
chanpin100_blog_collection = chanpin100_bolg_db['chanpin100_blog_collection']

sys.setrecursionlimit(1000000)

headers = {
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Host': 's.weibo.com',
    'Pragma': 'no-cache',
    'Referer': 'http://s.weibo.com/pic/&Refer=pic_box',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}

from urllib import parse


class WEIBO():
    def __init__(self):
        options = webdriver.ChromeOptions()
        options.add_argument('--headless')
        options.add_argument('--disable-gpu')
        options.add_argument('--disable-images')

        self.desired_capabilities = options.to_capabilities()
        self.driver = webdriver.Chrome(desired_capabilities = self.desired_capabilities, executable_path = '/Users/xuchaosheng/Workspace/knx-scrapy/libs/chromedriver')

        self.cookie = dict()

        random_article = list(chanpin100_blog_collection.aggregate([{'$sample': {'size': 1}}]))[0]['content']
        keywords = [i for i, f in pseg.cut(random_article) if f == 'n']

        for keyword in keywords:
            self.crawl(keyword, 0)

    def crawl(self, keyword, page):
        # params = {'search':keyword,'page':page}
        # params = parse.urlencode(params).encode('utf-8')
        # headers['Referer'] = 'http://s.weibo.com/pic/' + parse.urlencode({'':keyword})[1:] + '&Refer=pic_box'
        #
        # print(keyword)
        # try:
        #     r = requests.get('http://s.weibo.com/ajax/pic/list', headers = headers,params = params,cookies = self.cookie)
        # except:
        #     print('exception')
        #     return self.crawl(keyword,page + 1)

        self.driver.get('http://s.weibo.com/ajax/pic/list?search=' + keyword + '&page=' + str(page))
        time.sleep(1)
        # self.cookie.update(r.cookies)

        try:
            resp = json.loads(BeautifulSoup(self.driver.page_source, 'lxml').find('body').get_text())
        except:
            self.driver.quit()
            self.driver.close()
            self.driver = webdriver.Chrome(desired_capabilities = self.desired_capabilities, executable_path = '/Users/xuchaosheng/Workspace/knx-scrapy/libs/chromedriver')

            return self.crawl(keyword, page + 1)

        if not 'data' in resp or not 'pic_list' in resp['data']:
            return

        data = resp['data']['pic_list']
        is_end = resp['data']['is_end']

        for item in data:
            if not weibo_pic_text_coll.find_one({'mid': item['mid']}):
                item = {
                    'mid': item['mid'],
                    'pic': 'http:' + item['url'],
                    'text': BeautifulSoup(item['text'], 'lxml').get_text().strip()
                }
                pprint.pprint(item)

                weibo_pic_text_coll.insert_one(item)

        if not is_end:
            time.sleep(1)
            self.crawl(keyword, page + 1)


def start():
    WEIBO()


if __name__ == '__main__':
    pool = []

    for i in range(300):
        time.sleep(0.1)
        p = Process(target = start)
        p.start()
        pool.append(p)

    # for p in pool:
    #     p.start()

    for p in pool:
        p.join()
