
'''
 来 抓取wipo 信息
'''
import requests
import logging
from bs4 import BeautifulSoup
import json
from pymongo import MongoClient
import redis
import random
from multiprocessing.dummy import Pool, Manager
import os
import pickle
from pprint import pprint as pp
from util.proxy import PROXY
import time
import re

work_list = ['10174313',
             '11214072',
             '11796867',
             '12059218',
             '12091034',
             '12107168',
             '12137795',
             '12156384',
             '12241321',
             '12271818',
             '12408792',
             '12449443',
             '12515382',
             '12595423',
             '12602906',
             '12663123',
             '12666879',
             '12672433',
             '12672923',
             '12673143',
             '12674603',
             '12677023',
             '12737163',
             '12746712',
             '12867845',
             '12974232',
             '12974235',
             '12974237',
             '12974239',
             '12974241',
             '12974243',
             '12974244',
             '12974245',
             '12974247',
             '12974249',
             '12974250',
             '12974251',
             '12974252',
             '12974254',
             '12974257',
             '12974258',
             '12974259',
             '12974260',
             '12974261',
             '12974262',
             '12974263',
             '12974264',
             '12974265',
             '12974266',
             '12974267',
             '12974268',
             '12974269',
             '12974270',
             '12974271',
             '12974273',
             '12974277',
             '12974280',
             '12974282',
             '12974283',
             '12974284',
             '12974285',
             '12974287',
             '12974291',
             '12974292',
             '12974295',
             '12974299',
             '12974300',
             '12974303',
             '12974304',
             '12974305',
             '12974306',
             '12974307',
             '12974308',
             '12974310',
             '12974312',
             '12974314',
             '12974315',
             '12974316',
             '12974317',
             '12974318',
             '12974322',
             '12974323',
             '12974324',
             '12974326',
             '12974329',
             '12974331',
             '12974332',
             '12974333',
             '12974334',
             '12974335',
             '12974336',
             '12974337',
             '12974338',
             '12974341',
             '12974342', ]


MAIN_SECTIONS = (
    "Alias",
    "Pub. No.:",
    "Publication Date:",
    "Application Date:",
    "Publication Number:",
    "Application Number:",
    "Title:",
    "Publication Kind :",
    "Grant Number:",
    "Grant Date:",
    "IPC:",
    "Applicants:",
    "Inventors:",
    "Agent:",
    "Priority Data:",
    "Publication Language:",
    "Filing Language:",
    "International Application No.:",
    "International Filing Date:",
    "Abstract:",
)


def get_recorder_logger(log_name):
    my_logger = logging.getLogger(log_name)
    my_logger.setLevel(logging.DEBUG)
    if not my_logger.handlers:
        # logging format
        fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        # filehandler
        fh = logging.FileHandler('log/wipo/%s' % log_name)
        fh.setFormatter(fmt)
        fh.setLevel(logging.INFO)
        my_logger.addHandler(fh)
        # StreamHandler
        sh = logging.StreamHandler()
        sh.setFormatter(fmt)
        sh.setLevel(logging.DEBUG)
        my_logger.addHandler(sh)

    return my_logger


class FIND_SOUP():
    def __init__(self, soup):
        self.soup = soup
        if soup:
            if soup.string:
                self.string = soup.string.strip()
            else:
                self.string = None
        else:
            self.string = None

    def find(self, *args, **kw):
        if not self.soup:
            return FIND_SOUP(None)
        ret = self.soup.find(*args, **kw)
        if ret:
            return FIND_SOUP(ret)
        return FIND_SOUP(None)

    def find_all(self, *args, **kw):
        if not self.soup:
            return []
        ret = self.soup.find_all(*args, **kw)
        return ret

    def find_next_sibling(self, *args, **kw):
        if not self.soup:
            return None
        ret = self.soup.find_next_sibling(*args, **kw)
        return ret

    def get_text(self):
        if self.soup:
            return self.soup.get_text().strip()
        return None

    def get(self, *args, **kw):
        if self.soup:
            return self.soup.get(*args, **kw)
        return None


class BASE:
    @staticmethod
    def del_none(dic):
        no_list = []
        for k, v in dic.items():
            if not v or v == '':
                no_list.append(k)
        for k in no_list:
            del(dic[k])

    def get_data(self):
        BASE.del_none(self.ret_dict)
        return self.ret_dict


class WIPO_PAGE(BASE):

    def __init__(self, html):
        self.soup = BeautifulSoup(html, 'lxml')
        try:
            self.ret_dict = {}
            table = self.soup.find(
                "div", id='detailMainForm:NationalBiblio').find_all('table')[3]

            self._get_info_from_table(table, self.ret_dict)
        except Exception as e:
            MY_LOG.exception(e)
            self.ret_dict = {}
        # self.get_data()

    def _normalize_date_str(self, date_str):
        try:
            date_str = re.sub(r'(\d{2})\.(\d{2})\.(\d{4})',
                              r'\g<1>/\g<2>/\g<3>', date_str)
        except:
            pass

        if '\r\n' in date_str:
            return date_str.split('\r\n')

        return date_str

    def _get_info_from_table(self, table, data):

        for tr in table.find_all("tr"):
            tds = tr.find_all("td")

            if len(tds) < 2 or len(tds[0].text) < 4:
                continue

            inner_tabel = tds[0].find("table")
            if inner_tabel:
                self._get_info_from_table(inner_tabel, data)
                continue

            i = 0
            while len(tds) >= i + 2:
                caption = tds[i].text.strip()

                if caption not in MAIN_SECTIONS:
                    i += 1
                    continue

                if not tds[i + 1].text.strip() and len(tds) >= i + 3:
                    i += 1

                if caption == "IPC:":
                    tds[i + 1] = tds[i + 1].find("td")
                    if not tds[i + 1]:
                        i += 1
                        continue

                if tds[i + 1].find("span", {"lang": "en"}):
                    tds[i + 1] = tds[i + 1].find("span", {"lang": "en"})

                content = tds[i + 1].decode_contents(formatter=None)
                content = re.sub("\r?\n", "", content)
                content = content.replace(
                    "<br/>", "\r\n").replace("<BR/>", "\r\n")
                content = content.replace("<tr>", "\r\n")
                content = content.replace("&nbsp;", " ")
                content = re.sub("<[^>]*>", "", content).strip()

                data[caption.strip(' |:')] = self._normalize_date_str(content)
                i += 1




def get_complete_id(connection):
    collect = connection.find({}, {'_id': 0, 'Application Number': 1})
    ret = set()
    for each in collect:
        ret.put(each.get('Application Number'))
    return ret


def get_re_result(html):
    soup = BeautifulSoup(html, 'lxml')
    if not soup:
        return None
    tbody = soup.find('tbody', id='resultTable:tb')
    if not tbody:
        return None
    for i in range(0, 30, 3):
        tr = tbody.find('tr', id='resultTable:%s'%i)
        if not tr:
            return None
        text = tr.find('td', id='resultTable:%1.0f:resultListTableColumnCtr'%(i/3)).text
        if text == 'US':
            href = tr.find('a').get('href')
            return href

    return None


def get_page(que):
    while 1:

        work_ids = que.get()
        if work_ids == 'kill':
            que.put('kill')
            return

        if not USE_PROXY:
            proxies = None
        else:
            proxy = PROXY()
            my_proxy = proxy.get_random_proxy()
            proxies = {'https': my_proxy, }

        first_url = 'http://patentscope.wipo.int/search/en/result.jsf'
        repeat_num = 0
        session_repeat = 0
        while 1:
            session_repeat += 1
            session = requests.Session()
            try:
                req = session.get(first_url, timeout=10, proxies=proxies)
                soup = BeautifulSoup(req.text, 'lxml')
                ViewState = soup.find(
                    "input", {"name": "javax.faces.ViewState"}).get("value")
                
                while 1:
                    repeat_num += 1
                    data = {
                        "resultListForm": "resultListForm",
                        "resultListForm:goToPage": "1",
                        "resultListForm:refineSearchTop": 'FP:(%s)' % work_ids,
                        "resultListForm:commandRefineSearchTop": "Search",
                        "resultListForm:j_idt401": "workaround",
                        "javax.faces.ViewState": ViewState
                    }
                    try:
                        req = session.post(first_url, data=data, timeout=20, proxies=proxies)
                        if req.status_code == 200 and 'result.jsf' in req.url:
                            href = get_re_result(req.text)
                            if href:
                                req = session.get('http://patentscope.wipo.int/search/en/%s'%href)
                            else:
                                LOG_RELOG.error(work_ids)
                                continue

                        if req.status_code == 200 and 'detail.jsf?docId' in req.url:
                            wipo = WIPO_PAGE(req.text,)
                            ret_dict = wipo.get_data()
                            if not ret_dict:
                                print('empty,page %s' % work_ids)
                                continue
                            DATA_TARGET.insert_one(ret_dict)
                            print('%s has been storage' % work_ids)

                            work_ids = que.get()
                            if work_ids == 'kill':
                                que.put('kill')
                                return
                            repeat_num = 0
                            session_repeat = 0
                            continue
                        if req.status_code == 500 and 'IPBlock.java' in req.text:
                            LOG_500.info(work_ids)
                            if USE_PROXY:
                                proxy.remove_proxy(my_proxy)
                                my_proxy = proxy.get_random_proxy()
                                proxies = {'https': my_proxy, }
                            break

                        HTML_LOG.info('%s : %s' % (work_ids, req.status_code))
                        time.sleep(10)
                    except Exception as e:
                        POST_LOG.error('%s : %s' % (work_ids, e))
                    if repeat_num > 3:
                        repeat_num = 0
                        break
            except Exception as e:
                SESSION_EXCE.error(e)

            if session_repeat > 2:
                if USE_PROXY:
                    proxy.remove_proxy(my_proxy)
                    my_proxy = proxy.get_random_proxy()
                    proxies = {'https': my_proxy, }
                SESSION_LOG.info('session export')
                repeat_num = 0
                session_repeat = 0
                    # break


HTML_LOG = get_recorder_logger('HTML_LOG')
UNKNOW_LOG = get_recorder_logger('unknow_log')
SESSION_LOG = get_recorder_logger('SESSION_LOG')
SESSION_EXCE = get_recorder_logger('SESSION_EXCE')
POST_LOG = get_recorder_logger('POST_LOG')
REPEAT_LOG = get_recorder_logger('repeat_log')
PROXY_LOG = get_recorder_logger('proxy_log')
LOG_RELOG = get_recorder_logger('LOG_RELOG')
LOG_500 = get_recorder_logger('LOG_500')
USE_PROXY = True

DB = MongoClient('192.168.0.220', 27017)['wipo']
DATA_TARGET = DB['wipo_detail']
THREAD_NUM = 10
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'}


# def work(work_queue):

#     loop = asyncio.new_event_loop()
#     asyncio.set_event_loop(loop)
#     print('work')
#     tasks = [asyncio.ensure_future(get_page(work_queue))
#              for i in range(ASYNC_NUM)]
#     # tasks = [asyncio.ensure_future(test_work(work_queue)) for i in range(ASYNC_NUM)]
#     loop.run_until_complete(asyncio.wait(tasks))
#     loop.close()


def main():
    # source_urls = get_work_list()
    # finush_urls = get_all_url(DATA_TARGET)

    # work_urls = source_urls - finush_urls
    manager = Manager()
    que = manager.Queue(1000)
    pool = Pool(THREAD_NUM)

    pool.map_async(get_page, (que for _ in range(THREAD_NUM)))


    finush_ids = set()
    for each in DATA_TARGET.find({},{'_id':0,'Application Number':1}):
        finush_ids.add(each.get('Application Number'))

    # collection = MongoClient('192.168.1.231', 27017)['json']['json_collection']
    # for each in collection.find({}, {'_id': 0, 'applicationDataOrProsecutionHistoryDataOrPatentTermData': 1}):
    #     a = each['applicationDataOrProsecutionHistoryDataOrPatentTermData'][
    #         0]['applicationNumberText']['value']
    #     if a not in finush_ids:
    #         que.put(a)

    for work_id in work_list:
        if work_id not in finush_ids:
            que.put(work_id)
    que.put('kill')

    pool.close()
    pool.join()
    print('end')


def test():
    ret = get_work_list()
    # pp(ret)
    print(len(ret))
    # print(json.dumps(company.get_data()))

if __name__ == '__main__':
    main()
    # main()
