# encoding: utf-8
import datetime
from multiprocessing import Pool
from requests import adapters
import requests
from fake_useragent import UserAgent
import time
from time import sleep
import json
from lxml import html
from sql_ac.sql_1 import insert_one, select_all, insert_qwe, insert_date_no
import sys

sys.setrecursionlimit(1000000)
etree = html.etree

ua = UserAgent()
kv = {
    'user-agent': ua.random,
    "Connection": 'close',
    'cookie': '__wpkreporterwid_=4fa82d67-bbe8-4d5a-9611-ab32a55fb313; cna=cnkGGZFaLn0CAXFbPOPPTNea; ali_ab=113.91.60.227.1618989433749.5; UM_distinctid=178f34a79405c7-0f2bb0795f007a-31614c0c-1fa400-178f34a7941a42; taklid=feb875317ed8480a9213ffd1fae8caaa; _bl_uid=1Okjqnq0rp76an9d3kFjfed6Oqaa; ali_apache_id=11.186.201.1.1619146872717.397412.5; __last_loginid__=tb203626438; ali_beacon_id=113.91.60.69.1619164537746.342360.2; t=d4a03996bbc2e4a5dce4d03026e28a55; cookie2=1d38db29d6d7042419fc812cf4e35015; _tb_token_=e5b3abebe841b; ctoken=rF5BeiABHq4QMdIblDFapineneedle; __wapcsf__=1; xlly_s=1; keywordsHistory=%E7%82%92%E9%94%85%3B%E6%96%B0%E5%93%81%3B%E7%85%8E%E9%94%85%EF%BC%8C%E5%B9%B3%E5%BA%95%E9%94%85%3B%E5%B8%BD%E5%AD%90%3B%E6%97%B6%E4%BB%A4%E6%96%B0%E5%93%81%3B%E9%98%B2%E6%B0%B4%E9%9B%A8%E9%9E%8B%E5%A5%97%3B%E6%A3%89%E5%8D%95%E9%9D%A2%3B%E9%99%88%E7%A3%8A%3B%E6%B7%B1%E5%9C%B3%E5%B8%82%E5%8D%97%E5%B1%B1%E5%8C%BA%E4%BB%8A%E6%97%A5%E7%BE%8E%E6%9C%8D%E8%A3%85%E5%8E%82%3B%E5%8F%81%E9%BC%8E%E8%B4%B8%E6%98%93%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8; __cn_logon__=true; ali_apache_tracktmp=c_w_signed=Y; _m_h5_tk=205c492073050c4ad0567f22fa969955_1620213779566; _m_h5_tk_enc=714b7df3fc856cfa55ece0d16325d83f; alicnweb=touch_tb_at%3D1620204429214%7Clastlogonid%3Dtb704896168%7Cshow_inter_tips%3Dfalse; cookie1=UR3cMk3ye%2BEDtsZ1X%2FNahQu%2FsOnp1zVR%2Bc6WT02%2BIeE%3D; cookie17=UUpgRs0xZkRV4jrLfg%3D%3D; sg=49e; csg=4fa45516; lid=%E5%B0%8F%E5%BD%A9%E5%8D%B0test4; unb=2210999103899; uc4=id4=0%40U2gqyZ2pwcCxv5VDDdkqdEFhf74cSmbq&nk4=0%40sVZg6Yk46VYkcWtqIxqYlQ6TKsgtSw%3D%3D; __cn_logon_id__=%E5%B0%8F%E5%BD%A9%E5%8D%B0test4; ali_apache_track=c_mid=b2b-22109991038991109e|c_lid=%E5%B0%8F%E5%BD%A9%E5%8D%B0test4|c_ms=1|c_mt=3; _nk_=%5Cu5C0F%5Cu5F69%5Cu5370test4; last_mid=b2b-22109991038991109e; _csrf_token=1620204478730; _is_show_loginId_change_block_=b2b-22109991038991109e_false; _show_force_unbind_div_=b2b-22109991038991109e_false; _show_sys_unbind_div_=b2b-22109991038991109e_false; _show_user_unbind_div_=b2b-22109991038991109e_false; __rn_alert__=false; l=eBSIteC4jB0lSq1oBO5aPurza779UIRbzsPzaNbMiInca1udIQApPNCC3CNyxdtjgtfYwetrWDP1ndH9WlzU-AM67i_uARjSHxv9-; tfstk=cBEABugbl_f0mID8LrQlf_6LN-LOZwftNKGMXtwh4JhRl5tOidZ3vyghlAOx2TC..; isg=BGho0dMNnJkFLrDwA-Mg3vsdOVZ6kcyb6-OGnyKZZuO8fQrnyqN7KnL3dRWNzYRz'
}
PROXY_POOL_URL = 'http://localhost:5555/random'


def get_proxy():
    try:
        response = requests.get(PROXY_POOL_URL)
        if response.status_code == 200:
            return response.text
    except ConnectionError:
        return None


proxie = {
    'http': get_proxy()
}


def get_html(url):
    """
    请求网页
    :param url: url请求网址
    :return: 返回网页数据
    """
    for x in range(5):
        try:
            requests.adapters.DEFAULT_RETRIES = 20
            s = requests.session()
            s.keep_alive = False
            resp = requests.get(url, headers=kv, proxies=proxie, timeout=30)
            resp.close()
            if len(resp.text) < 1000:
                print(resp.text)
                print(url)
                print('该结束时间', datetime.datetime.now())
                input('qwe:')
                return get_html(url)
            else:
                return resp.text
        except Exception as e:
            print(e, '++0')
            sleep(15)
            return get_html(url)


def spider(url, k1,key_id):
    """
    提取网页json数据
    :param url: 网页数据
    :param k1: 关键词
    :param key_id: 关键词表id
    """
    dad = json.loads(url)
    for i in dad['data']['data']['offerList']:
        url = 'https://renders.1688.com/winport/page/archive.html?memberId=' + i['company'][
            'memberId'] + '&spm=a262gh.10415722.0.0'
        spider_data(get_html(url), i['company']['hoverName'], k1, i['company']['memberCreditUrl'], url,i['company']['memberId'],key_id)


def jj(e):
    """
    选取公司名
    :param e: xpath选取结果列表
    :return: 公司名
    """
    if len(e) >= 1:
        return e[0]
    else:
        return ''


def pp(e):
    """
    筛选手机号
    :param e: xpath结果列表
    :return: 手机号
    """
    l = ['', '']
    if len(e) != 0:
        for i in e:
            if len(i) == 11 and i[0] == '1' and i.find('-') == -1 and i.find('/') == -1:
                l[1] = i
            elif len(i) > 11 and i[-11] == '1' and i[-11:].find('-') == -1 and i[-11:].find('/') == -1:
                l[1] = i[-11:]
                l[0] = i
            else:
                l[0] = i
        return l
    return '1'


def spider_data(url, n, k1, k2, u,m,key_id):
    """
    取数据入库
    :param url:网页数据
    :param n:公司名
    :param k1:关键字
    :param k2:未抓取到信息的公司网页，以便2次深入爬取
    :param u:未抓取到信息的网页
    :param m:memberid
    :param key_id:关键词表id
    """
    soup = etree.HTML(url)
    name = soup.xpath('//em[contains(text(), "公司名称")]/following-sibling::*[1]/text()')
    zhuy = soup.xpath('//em[contains(text(), "主营产品")]/following-sibling::*[1]/text()')
    name_r = soup.xpath('//em/p[contains(text(), "系")]/../following-sibling::*[1]/text()')
    address = soup.xpath('//em[contains(text(), "联系地址")]/following-sibling::*[1]/text()')
    phone = soup.xpath("//div[@class='archive-sheet-item phone']/text()")
    try:
        if soup.xpath('//p[@class="archive-base-empty"]/text()')[0].find('暂未发布公司信息') != -1:
            print(u)
            insert_date_no(n, 0, k2,m)
    except Exception as e:
        p = pp(phone)
        print(p)
        if p != '1':
            insert_one(jj(name_r), p[0], p[1], k1, jj(name), jj(name), 1, '0', 1, '', '', '', '', '', '', 2,
                       k1, '', jj(address), '', '', '', '', '', '', 0, '', 0, '', 0, 0, 2, key_id, 0,
                       str(time.strftime('%Y-%m-%d %H:%M:%S')), str(time.strftime('%Y-%m-%d %H:%M:%S')), jj(zhuy), 0, 0,
                       '', 0)
        elif p == '1':
            insert_date_no(n, 1, k2,m)


def main(o, k, k1,key_id):
    """
    搜索列表获取memberid数据
    :param o: 页码
    :param k: 关键词url加密
    :param k1: 关键词
    :param key_id: 关键词表id
    """
    for j in range(0, 41, 20):
        url = 'https://search.1688.com/service/marketOfferResultViewService?keywords=' + k + '&n=y&netType=16&spm=a260k.dacugeneral.search.0' \
                                                                                             '&async=true&asyncCount=20&beginPage=' + str(
            o) + '&pageSize=60&startIndex=' + str(j) + '&pageName=major'
        spider(get_html(url), k1,key_id)


def tk(k, k1,key_id):
    """
    线程池
    :param k:关键词url加密
    :param k1:关键词
    :param key_id:关键词表id
    """
    pool = Pool(10)
    for i in range(1, 51):
        pool.apply_async(main, (i, k, k1,key_id))
    pool.close()
    pool.join()


if __name__ == '__main__':
    for j in select_all('merberid')[2674:]:
        endt = datetime.datetime.now()
        tk(j[2], j[1],j[0])
        print('********************************搞定一个啦，休息一下****************************************')
        endt1 = datetime.datetime.now()
        print('该关键词结束时间', endt1)
        print('该关键词爬取时间', endt1 - endt)
        sleep(30)
