import random
import time
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ee
from selenium.webdriver.support.wait import WebDriverWait
from multiprocessing import Pool
from tianyancha import const
from tianyancha.mongo import mongoDb

cusHeaders = DesiredCapabilities.PHANTOMJS.copy()
cusHeaders['phantomjs.page.customHeaders.User-Agent'] = const.user_agent
webdriver = webdriver.PhantomJS(desired_capabilities=cusHeaders)
waitTime = 20
searchTime = 0.2
totalPages = []


def getIndusturyPages():
    pages = []
    with mongoDb('industry') as industry:
        with mongoDb('spiderdPages') as spiderdPages:
            sdPages = [page["page"] for page in spiderdPages.find()]
            for ind in industry.findOutList("secList", {}):
                for i in range(1, 51):
                    page = "http://www.tianyancha.com/search/oc{0}/p{1}".format(ind["secInduCode"], i)
                    if (page not in sdPages):
                        pages.append({"page": page})
    return pages


def getMainByPage(pageObj):
    page = pageObj["page"]
    print('entering the page::::::::::::::::::: %s' % page)
    webdriver.get(page)
    locate = (By.CSS_SELECTOR, "div.search_right_item")
    try:
        WebDriverWait(webdriver, waitTime, searchTime).until(ee.presence_of_element_located(locate))
        for item in webdriver.find_elements_by_css_selector('div.search_right_item'):
            href = item.find_element_by_css_selector('div.search_repadding2 > a').get_attribute('href')
            companyId = href.rsplit('/')[-1]
            companyName = item.find_element_by_css_selector('a.query_name span').text
            companyAgent = \
                item.find_element_by_css_selector('div.search_row_new div:nth-child(1)').text.split("： ")[-1]
            companyMoney = \
                item.find_element_by_css_selector('div.search_row_new div:nth-child(2)').text.split("： ")[-1]
            companyRegDate = \
                item.find_element_by_css_selector('div.search_row_new div:nth-child(3)').text.split("：")[-1]
            companyBase = item.find_element_by_css_selector('div.search_base.position-rel').text.split('\n')
            companyPosition = companyBase[0]
            companyScore = companyBase[1]
            companyIsLive = companyBase[3]
            companyInfo = {
                "companyId": companyId,
                "companyName": companyName,
                "href": href,
                "companyAgent": companyAgent,
                "companyMoney": companyMoney,
                "companyRegDate": companyRegDate,
                "companyPosition": companyPosition,
                "companyScore": companyScore,
                "companyIsLive": companyIsLive
            }
            saveOne("gys_mainInfo", companyInfo)
        saveOne("spiderdPages", pageObj)
        totalPages.remove(pageObj)
        time.sleep(random.randrange(1, 10))
    except ee:
        saveMany('noSpidersPages', totalPages)
        print('find this page :%s is failuer!!' % page)
        print(ee)

        pass


def getGysContent(gysObj):
    page = gysObj["href"]
    print('{0}：is handlering :{1}'.format(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()),page))
    webdriver.get(page)
    locator = (By.CSS_SELECTOR, 'div.company_info_text>div.ng-binding')
    try:
        WebDriverWait(webdriver, waitTime, searchTime).until(ee.presence_of_element_located(locator))
        companyId = page.rsplit('/')[-1]
        companyName = webdriver.find_element_by_css_selector('div.company_info_text>div.mb5').text
        telPhone = \
            webdriver.find_element_by_css_selector('div.company_info_text > span:nth-child(4)').text.split(
                ": ")[
                -1]
        email = \
            webdriver.find_element_by_css_selector('div.company_info_text > span:nth-child(5)').text.split(
                ": ")[
                -1]
        address = \
            webdriver.find_element_by_css_selector('div.company_info_text > span:nth-child(9)').text.split(
                ": ")[
                -1]
        website = \
            webdriver.find_element_by_css_selector('span[ng-hide="company.websiteList"]').text.split(": ")[-1]

        containTable = webdriver.find_element_by_css_selector('div.company-content table')
        # 工商注册号
        regNumber = containTable.find_element_by_css_selector('tr:nth-child(1) td:nth-child(1)').text.split(': ')[
            -1]
        # 组织机构代码
        orgNumber = containTable.find_element_by_css_selector('tr:nth-child(1) td:nth-child(2)').text.split('：')[
            -1]
        # 统一信用代码
        creditCode = containTable.find_element_by_css_selector('tr:nth-child(2) td:nth-child(1)').text.split('：')[
            -1]
        # companyOrgType=有限责任公司（非自然人投资或控股的法人独资）
        companyOrgType = \
            containTable.find_element_by_css_selector('tr:nth-child(2) td:nth-child(2)').text.split('：')[-1]
        # industry=批发业
        industry = containTable.find_element_by_css_selector('tr:nth-child(3) td:nth-child(1)').text.split('：')[-1]
        # fromTime=1293638400000
        fromTime = containTable.find_element_by_css_selector('tr:nth-child(3) td:nth-child(2)').text.split('：')[-1]
        # businessScope=矿产品、铁精粉
        businessScope = \
            containTable.find_element_by_css_selector('tr:nth-child(6) td:nth-child(1)').text.split('：')[-1]
        lawsuitCount = 0
        try:
            lawsuitCount = webdriver.find_element_by_css_selector('div#nav-main-lawsuitCount span.c9').text
        except:
            pass
        companyContent = {
            "companyId": companyId,
            "companyName": companyName,
            "telPhone": telPhone,
            "email": email,
            "address": address,
            "website": website,
            "regNumber": regNumber,
            "orgNumber": orgNumber,
            "companyOrgType": companyOrgType,
            "industry": industry,
            "fromTime": fromTime,
            "businessScope": businessScope,
            "lawsuitCount": lawsuitCount
        }
        update_one("gys_mainInfo",{"companyId": gysObj["companyId"]}, companyContent)
        time.sleep(random.randrange(1, 10))
    except ee:
        print('get getGysContent(self,%s) is fail' % page)
        pass


def saveMany(doc, list):
    try:
        with mongoDb(doc) as main:
            main.insert(list)
        print('insert data is ok!!!!!!!!!!')
    except:
        print('insert data is fail!!!!!!!!!!')
        pass


def saveOne(doc, item):
    try:
        with mongoDb(doc) as main:
            main.insert_one(item)
        print('insert data is ok!!!!!!!!!!')
    except:
        print('insert data is fail!!!!!!!!!')
        pass


def update_one(doc, filter, item):
    try:
        with mongoDb(doc) as main:
            main.update_one(filter, {"content": item})
        print('updateOne({0}, {1}, {2}) is success...................'.format(doc, filter, item))
    except:
        print('updateOne({0}, {1}, {2}) is err...................'.format(doc, filter, item))
        pass


if __name__ == '__main__':
    # 爬取主要信息
    # totalPages = getIndusturyPages()
    # pool = Pool(4)
    # for pageObj in totalPages:
    #     pool.apply_async(getMainByPage, (pageObj,))
    # pool.close()
    # pool.join()
    p = Pool(4)
    # mainList=[]
    with mongoDb('gys_mainInfo') as main:
        for gysObj in main.find({"content": {"$exists": False}}):
            p.apply_async(getGysContent, (gysObj,))
        p.close()
        p.join()
