import json
import requests
from time import sleep
from redis import ConnectionPool, StrictRedis
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
import os
from hashlib import md5
from config import devDataDir
import re
from urllib3 import encode_multipart_formdata
import base64


class SimpleHash(object):
    def __init__(self, cap, seed):
        self.cap = cap
        self.seed = seed

    def hash(self, value):
        ret = 0
        for i in range(len(value)):
            ret += self.seed * ret + ord(value[i])
        return (self.cap - 1) & ret


class RedisClient(object):
    pool = ConnectionPool(**{
        'host': '39.105.230.139',
        'port': 6379,
        'db': 5,
        'password': 950218
    })
    client = StrictRedis(connection_pool=pool)

    def __init__(self, blockNum=1, key='bloomfilter'):
        self.bit_size = 1 << 31
        self.seeds = [5, 7, 11, 13, 31, 37, 61]
        self.key = key
        self.blockNum = blockNum
        self.hashfunc = []
        for seed in self.seeds:
            self.hashfunc.append(SimpleHash(self.bit_size, seed))

    def isContains(self, str_input):
        if not str_input:
            return False
        m5 = md5()
        m5.update(str_input.encode('utf-8'))
        str_input = m5.hexdigest()
        ret = True
        name = self.key + str(int(str_input[0:2], 16) % self.blockNum)
        for f in self.hashfunc:
            loc = f.hash(str_input)
            ret = ret & self.client.getbit(name, loc)
        return ret

    def insert(self, str_input):
        m5 = md5()
        m5.update(str_input.encode('utf-8'))
        str_input = m5.hexdigest()
        name = self.key + str(int(str_input[0:2], 16) % self.blockNum)
        for f in self.hashfunc:
            loc = f.hash(str_input)
            self.client.setbit(name, loc, 1)

    def push_doc_name(self, name):
        self.client.rpush('docname', name)

    def get_doc_name(self):
        doc_name = self.client.lpop('docname')
        if doc_name:
            return str(doc_name, encoding='utf-8')
        else:
            return None

    def push_download_doc(self, name):
        self.client.rpush('download', name)

    def push_url(self, url):
        self.client.rpush('docurl', url)

    def pop_url(self):
        return str(self.client.lpop('docurl'), encoding='utf-8')

    def pop_pre_url(self):
        pre_docurl = self.client.lpop('pre_docurl')
        if pre_docurl:
            return str(pre_docurl, encoding='utf-8')
        else:
            return None

    def set_current_page(self, page):
        self.client.set('current', page)

    def get_current_page(self):
        url = self.client.get('current')
        return str(url, encoding='utf-8')

    def set_cookies(self, cookies):
        self.client.set('cookies', cookies)

    def get_cookies(self):
        return str(self.client.get('cookies'), encoding='utf-8')


class IXueShu(object):
    starturl = 'https://www.ixueshu.com/clc/1.html?sort=downloads%20desc&page=1'
    url = 'https://www.ixueshu.com/document/2e1c32172b661dd898beecc80c47c671318947a18e7f9386.html'
    r = RedisClient()

    def GetUrlList(self):
        currentPage = self.r.get_current_page()
        currentPageIndex = currentPage.split('page=')[1]
        r = requests.get(currentPage)
        tree = etree.HTML(r.content)
        documentList = tree.xpath('/html/body/div[6]/div[3]/div[2]/ul/li')
        next_url = tree.xpath('/html/body/div[6]/div[3]/div[2]/div/a[13]/@href')
        for document in documentList:
            url = document.xpath('./div[@class="doc_intro"]/div[@class="doc_title"]/a/@href')[0]
            if not self.r.isContains(url):
                self.r.insert(url)
                self.r.push_url('https://www.ixueshu.com' + url)
        if len(next_url) != 0 and currentPageIndex != '50':
            next_page = 'https://www.ixueshu.com' + next_url[0]
            self.r.set_current_page(next_page)
        else:
            index = currentPage.split('https://www.ixueshu.com/clc/2')[1][0]
            next_page = 'https://www.ixueshu.com/clc/2' + str(
                int(index) + 1) + '.html?sort=downloads%20desc' + '&page=1'
            self.r.set_current_page(next_page)

    def login(self):
        chrome_options = Options()
        driver = webdriver.Chrome(options=chrome_options)
        driver.set_window_size(1920, 1080)
        try:
            driver.get('https://www.ixueshu.com')
            driver.implicitly_wait(10)
            driver.find_element_by_class_name('login_switch').click()
            driver.implicitly_wait(1)
            driver.find_element_by_id('username').send_keys('17793288202')
            driver.find_element_by_id('password').send_keys('xueshu2008')
            driver.find_element_by_css_selector('.login_btn>input').click()
            dictCookies = driver.get_cookies()
            jsonCookies = json.dumps(dictCookies)
            self.r.set_cookies(jsonCookies)
            driver.quit()
        except Exception as e:
            print(e)
            driver.quit()

    def readInfo(self):
        try:
            url = self.r.pop_url()
            response = requests.get(url)
            tree = etree.HTML(response.content)
            print('获取论文信息...')
            title = tree.xpath('//*[@id="preview"]/div[2]/div[1]/h1/text()')[0]
            print(title)
            author = tree.xpath('//*[@id="preview"]/div[2]/div[1]/div[1]/span[1]/text()')[0]
            book = tree.xpath('//*[@id="preview"]/div[2]/div[1]/div[1]/span[2]/text()')[0]
            pattern = re.compile(u'var docId = "\d{16}"')
            docId = pattern.search(str(response.content, encoding='utf-8')).group().split('var docId = ')[1].replace(
                "\"",
                '')
            sign = str(base64.b64encode((docId + '_xs!@#123').encode('utf-8')), encoding='utf-8')
            response = requests.get(
                'https://www.ixueshu.com/document/search/relate-pc?docId=' + docId + '&size=80&sign=' + sign)
            docList = json.loads(response.content)
            pipe = self.r.client.pipeline()
            for doc in docList['body']:
                pipe.rpush('pre_docurl', doc['url'])
            response = requests.get(
                'https://www.ixueshu.com/query/similarity/journal.html?docId=' + docId + '&journal=' + book + '&size=80')
            docList = json.loads(response.content)
            for doc in docList['data']:
                pipe.rpush('pre_docurl', doc['url'])
            pipe.execute()
            if '(无全文)' not in title:
                docDir = devDataDir + title + '#' + author + '#' + book + '.pdf'
                cookies = json.loads(self.r.get_cookies())
                cookieDict = {}
                for cookie in cookies:
                    cookieDict[cookie['name']] = cookie['value']
                response = requests.get("https://www.ixueshu.com/file/" + docId + ".html", cookies=cookieDict)
                downloadUrl = json.loads(response.content)['message']
                file = requests.get(downloadUrl, stream=True)
                with open(docDir, 'wb') as f:
                    f.write(file.content)
                self.r.push_doc_name(title + '#' + author + '#' + book + '.pdf')

                # self.post_files(filename=title + '#' + author + '#' + book + '.pdf', header={}, data={}, filepath=docDir)
                # os.remove(docDir)
        except Exception as e:
            print(e)

    def post_files(url, header=None, data=None, filename="", filepath=""):
        if data is None:
            data = {}
        if header is None:
            header = {}
        data['file'] = (filename, open(filepath, 'rb').read())
        encode_data = encode_multipart_formdata(data)
        data = encode_data[0]
        header['Content-Type'] = encode_data[1]
        r = requests.post('http://api.samereport.com/v1/admin/cnki/upload', headers=header, data=data)
        print(r.content)


if __name__ == '__main__':
    bot = IXueShu()
    while True:
        bot.readInfo()
        sleep(5)
