# -*- coding: utf-8 -*-
# @Time    : 2018/9/7 11:31
import threading
import os
path = os.path.abspath(os.path.dirname(os.getcwd()))
import sys
sys.path.append(path)
import requests
import pymongo,re
import config,log
from random import randint
from hashlib import md5
from time import sleep
from collections import OrderedDict
from lxml.html import etree
from multiprocessing import Process, Queue, Pool
headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.12 Safari/537.36',
           'cookie': 'DUP=Q=4k_aVkb7EB6cfKTAFc9DCQ2&T=344770957&A=2&IG=46D793435C5042C2B00A923163F2292F; SRCHD=AF=PORE; SRCHUID=V=2&GUID=E7EABF2B8B374394B8AAE5D7206F8A20&dmnchg=1; _EDGE_V=1; MUID=2152C4059A376D592CB8C8BE9B486C4B; SRCHUSR=DOB=20181204&T=1543916417000; MUIDB=2152C4059A376D592CB8C8BE9B486C4B; ipv6=hit=1543920020087&t=4; SRCHHPGUSR=CW=832&CH=753&DPR=1&UTC=480&WTS=63679513218; ENSEARCH=BENVER=1; _EDGE_S=mkt=zh-cn&F=1&SID=00A5831942A26642021A8FA243DD6794; _FP=hta=on; _SS=SID=00A5831942A26642021A8FA243DD6794&HV=1543916558',
}
db_million = config.db_million
url_0 = db_million['url_0']
url_1 = db_million['url_1']
url_2 = db_million['url_2']
url_3 = db_million['url_3']
url_4 = db_million['url_4']
url_5 = db_million['url_5']
url_6 = db_million['url_6']
url_7 = db_million['url_7']
url_8 = db_million['url_8']
url_9 = db_million['url_9']
url_a = db_million['url_a']
url_b = db_million['url_b']
url_c = db_million['url_c']
url_d = db_million['url_d']
url_e = db_million['url_e']
url_f = db_million['url_f']
url_s = {"0":url_0, "1":url_1, "2":url_2, "3":url_3, "4":url_4, "5":url_5, "6":url_6, "7":url_7, "8":url_8,
         "9":url_9, "a":url_a, "b":url_b, "c":url_c, "d":url_d, "e":url_e, "f":url_f}
new_url_biying = config.new_li_db_biying
li_db_words = config.li_db_words
log_biying = log.Log("biying")

class BiYing(object):
    def __init__(self):
        # self.log_biying = log.Log("biying")
        self.pro = self.get_pro()

    def get_pro(self):
        db_one = config.con_proxy
        li_db_one = db_one['proxies_ip']
        xx_pro = []
        li_db_two = li_db_one.find()
        for i in li_db_two:
            ip = i['ip']
            xx_pro.append({"http":ip})
        return xx_pro

    def find_cookie(self,proxies):
        try:
            self.ses = requests.session()
            self.ses.get("https://cn.bing.com/", proxies = proxies, headers=headers,timeout=360)
            # data_one = {'enASset':'1',
            #         'enAS':'1',
            #         'rpp':'50',
            #         'newwndset':'1',
            #         'newwnd':'1',
            #         'newsntset':'1',
            #         'newsnt':'1',
            #         'setlang':'NO_OP',
            #         'langall':'1',
            #         'hp_ap':'PlayAutomatically',
            #         'pref_sbmt':'1',
            #         'ru':'https://cn.bing.com/',
            #         'GUID':'3776979BB8C06D8A2D689B5AB9EE6C09',
            #         'uid':'24D003B1',
            #         'sid':'2F4FCD5DF74C6CA628CFC19CF6626D27',
            #         'is_child':'0'}
            # self.ses.get("https://cn.bing.com/account", params=data_one, proxies=proxies, headers=headers, timeout=360)
            # print(self.ses.cookies.get_dict())
        except:
            sys.exit()

    def find_one(self, data, proxies, word_type):
        for x in range(0,100):
            try:
                data['first'] = str(int(x*10)+1)
                # sleep(randint(1,3))
                sleep(0.1)
                # req = requests.get('https://cn.bing.com/search?', params=data, proxies = proxies, headers=headers,timeout=360)
                req = self.ses.get('https://cn.bing.com/search?', params=data,proxies = proxies, headers=headers, timeout=360)
                req.close()
                if req.status_code == 200:
                    con_et = etree.HTML(req.text)
                    result_s = con_et.xpath('//ol//li[@class="b_algo"]')
                    res_len = len(result_s)
                    if (x+1) %30 == 0:
                        log_biying.info("page: %s  keyword:%s "%(str(x+1),data['q']))
                        print("jiansuo_url len(): %s  page: %s  keyword:%s "%(res_len,str(x+1),data['q']))
                    if res_len == 0 and x == 0:
                        log_biying.error("this not find :%s"%(data['q']))
                    for xiabiao,nr in enumerate(result_s):
                        try:
                            href = nr.xpath('.//h2/a/@href')[0]
                            update_logo = nr.xpath('.//div[@class="inner"]/a[@class="sb_fav"]')
                            title_s = nr.xpath('.//h2/a//text()')
                            title = "".join(title_s)
                            content_s = nr.xpath('.//div[@class="b_caption"]/p//text()')
                            summary = self.repl("".join(content_s))
                            print(title)
                            try:
                                logo = update_logo[0]
                                self.insert_mongo_biying(href, title, summary, word_type, data['q'])
                                if xiabiao == (res_len-1) and (x+1) %30 == 0:
                                    print("charu :%s"%(title))
                            except Exception as e:
                                if xiabiao == (res_len-1) and (x+1) %30 == 0:
                                    print(e)
                        except Exception as e:
                            print(e)
                            # biying_log.error("解析内容出错 error:%s" % (e))
                            continue
                    if  x>=1:
                        if res_len<10:
                            return
                else:
                    if req.status_code == 404:
                        continue
                    print("error status_code:%s "%(str(req.status_code)))
                    # biying_log.error("error status_code:%s "%(str(req.status_code)))
            except Exception as e:
                print(e)
                # biying_log.error("+++++++  find_one error:%s"%(e))

    def insert_mongo_biying(self, url, title, summary, word_type, q):
        url_md5 = self.md5_generator(url)
        num = self.quchong(url_md5)
        if num != 0:
            return
        save = OrderedDict()
        save['url'] = url
        save['title'] = title
        save['summary'] = summary
        save['state'] = 0 #状态
        save['url_md5'] = url_md5
        save['type'] = word_type #类型
        save['q'] = q
        try:
            new_url_biying.insert(save)
        except Exception as e:
            if str(e).find("E11000") > -1:
                #找到
                nr = new_url_biying.find_one({"url_md5":url_md5})
                #查看长度
                if len(nr) != 8:
                    # 小于八个字段
                    save['state'] = nr['state']
                    save['_id'] = nr['_id']
                    try:
                        # 删除重插
                        new_url_biying.save(save)
                    #     new_url_biying.update_one({"url_md5":url_md5},{'$set':save})
                    except Exception as e:
                        print(e)

                return
            print("插入失败：%s" % (e))

    def repl(self, text):
        try:
            new_text = re.sub(r"[\n\t\r\u3000\xa0\u2002]","",text).strip()
            return new_text
        except:
            return text

    def delete_mongo(self, proxies):
        print("stare : %s  :%s" % (os.getpid(), proxies))
        # 进入必应获取cookie
        self.find_cookie(proxies)
        log_biying.info("stare : %s   :%s" % (os.getpid(), proxies))
        data = {
            'q': 'filetype:pdf α反义寡核苷酸 ',
            'qs': 'n',
            'sp': '-1',
            'pq': 'filetype:pdf α反义寡核苷酸 ',
            'sc': '1-19',
            'sk':'',
            'cvid': 'C90A160D0DE54E7693EEEF48C9E5F007',
            'first': '1',
            'FORM': 'PORE',
        }
        while True:
            try:
                # 关键词
                item = li_db_words.find_and_modify({'state_biying':0}, {'$set': {"state_biying":0}})
                if not item:
                    return
                name = item['word']
                # 搜索查询关键词
                # self.biying_key_mongo.find_url(name)
                for word in ['doc','pdf','ppt']:
                    data['q'] = 'filetype:%s %s'%(word,name)
                    data['pq'] = 'filetype:%s %s'%(word,name)
                    self.find_one(data, proxies, word)
            except Exception as e:
                print(e)
                # biying_log.info("error delete_mongo: %s"%(e))

    def md5_generator(self,url):
        return md5(url.encode()).hexdigest()

    def proce(self):
        trader = []
        for i in range(1):
            proxies = self.pro[i]
            pr = Process(target=self.delete_mongo, args=(proxies,))
            sleep(0.5)
            pr.start()
            trader.append(pr)
        for i in trader:
            i.join()
        print('proce this is pid: %s' % os.getpid())

    # def delete_carw(self, proxies):
    #     threads = []
    #     for i in range(2):
    #         t = threading.Thread(target=self., name='LoopThread_%s'% i, args=(proxies,))
    #         threads.append(t)
    #     for t in threads:
    #         sleep(0.5)
    #         t.start()
    #     for t in threads:
    #         # 多线程多join的情况下，依次执行各线程的join方法, 这样可以确保主线程最后退出， 且各个线程间没有阻塞
    #         t.join()
    #     print('delete this is pid: %s ' % os.getpid())
    #     return

    def quchong(self, url_md5):
        return url_s["%s" % (url_md5[0])].find({"md5": url_md5}).count()



if __name__ == '__main__':
    BiYing().proce()


