#! /usr/bin/env python3
# -*- coding: utf-8 -*-

#######################
# 天眼查全国企业信息查询系统
# 2016-07-27
#######################
import sys,os
from selenium import webdriver
import time
import re
from bs4 import BeautifulSoup
import urllib
import pymysql
import json
import sys
import math
dirs = os.path.abspath(os.path.dirname(__file__)+"/../Config") 
os.sys.path.append(dirs)   #将上上级目录加载到python的环境变量中 
# os.sys.path.append("D:/job/crawler/Config")
from config import conn,driver,logpath

# reload(sys)  
# sys.setdefaultencoding('utf-8')   
cur = conn.cursor()
#获取企业基本信息数据
def get_enterprise_data(ename):
   # conn = pymysql.connect(
   #  host='localhost',
  # port = 3306,
  # user='root',
  # passwd='root',
  # db ='crawler',
  # charset="utf8"
  #   )
   # cur = conn.cursor()
   name = cur.execute("select basic_info_list from enterprise")
   name = cur.fetchall()
   userid = cur.execute("select uid from court")
   userid = cur.fetchall()
   useridm = cur.execute("select uid from copyright")
   useridm = cur.fetchall()
   useridk = cur.execute("select uid from patent")
   useridk = cur.fetchall()

   ee = []
   for e in range(0,len(name)):
    ee.append(name[e][0])
   oo = []
   for o in range(0,len(userid)):
    oo.append(userid[o][0])
   mm = []
   kk = []
   for m in range(0,len(useridm)):
    mm.append(useridm[m][0])
   for k in range(0,len(useridk)):
    kk.append(useridk[k][0])

    #网站地址
   keyword = urllib.parse.quote(ename)
   url = 'http://www.tianyancha.com/search/'+keyword
   #获得搜索结果页面
   # driver = webdriver.PhantomJS(executable_path='D:\python\python3.4\Scripts\Phantomjs.exe')
   driver.maximize_window()
   driver.get(url)
   time.sleep(2)
   # 判断是否搜索到内容
   soups = BeautifulSoup(driver.page_source,"html.parser")
   query_name = soups.find('a',class_="query_name")
   if query_name:
       #从搜索结果中点击第一个结果
    driver.find_element_by_class_name('query_name').click()
    now = driver.window_handles
    driver.switch_to_window(driver.window_handles[1])
    time.sleep(3)
    #抓取页面
    soup = BeautifulSoup(driver.page_source,"html.parser")

    scode = soup.find('td',class_="td-score").find('img')
    # 评分
    score = scode.attrs["ng-alt"]

    basic_info_list = soup.find('div',class_="company_info_text").find('p').get_text().strip()
    basic_info_lists = "'"+basic_info_list+"'"
    dates =  "'"+soup.find('td',class_="td-regTime-value").get_text()+"'"
    regCapital = "'"+soup.find('td',class_="td-regCapital-value").get_text()+"'"
    regStatus = "'"+soup.find('td',class_="td-regStatus-value").get_text().strip()+"'"
    legalPersonName =  "'"+soup.find('td',class_="td-legalPersonName-value").get_text()+"'"
    c8 = soup.find_all('div',class_="c8")
    staff_name = soup.find_all('a',{"event-name":"company-detail-staff"})
    staff_position = soup.find_all('span',{"ng-repeat":"join in s.typeJoin track by $index"})
    investment = soup.find_all('div',{"ng-repeat":"investor in company.investorList track by $index"})
    tender = soup.find_all('div',{"ng-repeat":"bid in bidingData.companyBidList track by $index"})
    outinvestment = soup.find_all('a',{"event-name":"company-detail-out-investment"})
    cases = soup.find_all('div',{"ng-repeat":"lawSuit in company.lawsuitObj.items track by $index"})
    court =  soup.find_all('div',{"ng-repeat":"data in company.Cour|limitTo:5:courIndex*5"})
    copyright = soup.find_all('div',{"ng-repeat":"co in copyRight.copyrightRegList"})
    patent = soup.find_all('div',{"ng-repeat":"data in patent.patentList"})
    # print(court)
    # sys.exit(0)

    # 案件诉讼
    ww = []
    if cases != []:
     #案件诉讼总数
     cases_sum = int(soup.find('div',id="nav-main-lawSuit").find('span').get_text().strip())
     # 案件诉讼页数
     cases_list = math.ceil(cases_sum/20)
     if cases_list > 5:
      cases_list = 5;
     # cases_list = 100
     cases_num = 0
     while(cases_num<cases_list):
       cases_soup = BeautifulSoup(driver.page_source,"html.parser")
       cases = cases_soup.find_all('div',{"ng-repeat":"lawSuit in company.lawsuitObj.items track by $index"})
       for w in cases:
          ww.append(w.get_text().strip())

       if(cases_list!=1):
         driver.find_element_by_css_selector("div[ng-if=\"company.lawSuitTotal>0\"]").find_element_by_class_name('pagination-next').find_element_by_class_name('ng-binding').click()
       time.sleep(2)
       cases_num = cases_num+1
       if cases_num>=cases_list:
         break
    else:
          cases = soup.find_all('div',{"ng-repeat":"lawSuit in company.lawsuitObj.items track by $index"})
    
    # 法院公告
    ff = []
    if court != []:
     #法院公告总数
     court_sum = int(soup.find('div',id="nav-main-court").find('span').get_text().strip())
     # 法院公告页数
     court_list = math.ceil(court_sum/3)
     if court_list>5:
      court_list = 5
     # court_list = 5
     court_num = 0
     while(court_num<court_list):
       court_soup = BeautifulSoup(driver.page_source,"html.parser")
       court =  court_soup.find_all('div',{"ng-repeat":"data in company.Cour|limitTo:5:courIndex*5"})
       for f in court:
          ff.append(f.get_text().strip().split("："))
       if(court_list!=1):
         driver.find_element_by_css_selector("div[ng-if=\"company.Cour.length>0\"]").find_element_by_class_name('pagination-next').find_element_by_class_name('ng-binding').click()
       time.sleep(2)
       court_num = court_num+1
       if court_num>=court_list:
         break
    else:
        court =  soup.find_all('div',{"ng-repeat":"data in company.Cour|limitTo:5:courIndex*5"})

    # 著作权
    ll = []
    if copyright != []:
     # 著作权总数
     copyright_sum = int(soup.find('div',id="nav-main-copyright").find("span").get_text().strip())
     # 著作权页数
     copyright_list = math.ceil(copyright_sum/5)
     if copyright_list > 5:
      copyright_list = 5
     copyright_num = 0
     while(copyright_num<copyright_list):
       copyright_soup = BeautifulSoup(driver.page_source,"html.parser")
       copyright = copyright_soup.find_all('div',{"ng-repeat":"co in copyRight.copyrightRegList"})
       for l in copyright:
         ll.append(l.get_text().strip().split("\n"))
       if(copyright_list !=1):
         driver.find_element_by_id('nav-main-copyright').find_element_by_class_name('pagination-next').find_element_by_class_name('ng-binding').click()
       time.sleep(2)
       print(copyright_num)
       copyright_num = copyright_num+1
       if copyright_num>=copyright_list:
         break
    else:
      copyright = soup.find_all('tr',{"ng-repeat":"co in copyRight.copyrightRegList"})

    # 专利总数
    pp = []
    if patent != []:
     patent_sum = int(soup.find('div',id="nav-main-patent").find('span').get_text().strip())
     print(patent_sum)
     # 专利页数
     patent_list = math.ceil(patent_sum/5)
     if patent_list > 5:
      patent_list = 5
     print(patent_list)
     patent_num = 0
     while(patent_num<patent_list):
       patent_soup = BeautifulSoup(driver.page_source,"html.parser")
       patent = patent_soup.find_all('div',{"ng-repeat":"data in patent.patentList"})
       for p in patent:
         pp.append(p.get_text().strip().split("："))
       if(patent_list !=1):
         driver.find_element_by_id('nav-main-patent').find_element_by_class_name('pagination-next').find_element_by_tag_name("a").click()
       time.sleep(2)
       print(patent_num)
       patent_num = patent_num+1
       if patent_num>=patent_list:
         break
    else:
      patent = soup.find_all('div',{"ng-repeat":"data in patent.patentList"})

    data = []
    cc = []
    ii = []
    yy = []
    uu = []
    nn = []
    qq = []

    # 法定代表人
    # for i in legalPersonName:
    #    data.append(i.get_text().strip())
    # 注册时间
    # for i in date:
    #    data.append(i.get_text().strip())
    # 注册资本
    # for i in regCapital:
    #    data.append(i.get_text().strip())
    #状态
    # for i in regStatus:
    #    data.append(i.get_text().strip())
    # qiyemingcheng = driver.title.split('】')[1].split('信息查询')[0]
    # data.append(qiyemingcheng)
    # for i in basic_info_list:
    #    data.append(i.get_text().strip())
    # 行业,工商注册号,企业类型,组织机构代码,营业期限,登记机关,核准日期,统一信用代码,注册地址,经营范围
    for j in c8:
       cc.append("'"+j.get_text().strip()+"'")
    # 主要任职人员姓名
    for n in staff_name:
       nn.append(n.get_text().strip())
    staffnames = json.dumps(nn, ensure_ascii=False)
    # 主要任职人员职位
    for u in staff_position:
       uu.append(u.get_text().strip())
    staffpositions = json.dumps(uu, ensure_ascii=False)
     # 股东信息
    for s in investment:
       ii.append(s.get_text().strip())
    investments = "'"+json.dumps(ii, ensure_ascii=False)+"'"
     # 对外资产
    for y in outinvestment:
       yy.append(y.get_text().strip())
    outinvestments = json.dumps(yy, ensure_ascii=False)

    # 案件诉讼
    casess = json.dumps(ww, ensure_ascii=False)

    # 招标信息
    for q in tender:
       qq.append(q.get_text().strip())
    tenders = json.dumps(qq, ensure_ascii=False)
    lenc = len(cc)
    while (lenc <= 11):
        cc.append("NULL")
        lenc = lenc+1
    uid = cur.execute("select eid from enterpriseinfo where ename like '%"+ename+"%'")
    uid = cur.fetchone()
    uid = uid[0]
    uids = str(uid)
    if basic_info_list in ee:
        cur.execute("update enterprise set Registrationtime="+dates+",regCapital="+regCapital+",regStatus="+regStatus+",legalPersonName="+legalPersonName+",industry="+cc[1]+",registrationnumber="+cc[2]+",enterprisetype="+cc[3]+",Organizationcode="+cc[4]+\
            ",Businessterm="+cc[5]+",Registrationauthority="+cc[6]+",approvaldate="+cc[7]+",Unifiedcreditcode="+cc[8]+",registeredaddress="+cc[9]+",Scopebusiness="+cc[10]+",investment="+investments+"where basic_info_list ="+basic_info_lists)
    else:
        cur.execute("INSERT INTO enterprise (id,basic_info_list,Registrationtime,regCapital,regStatus,legalPersonName,industry,registrationnumber,enterprisetype,Organizationcode,Businessterm,Registrationauthority,approvaldate,Unifiedcreditcode,registeredaddress,Scopebusiness,investment,outinvestment,staffpositions,staffnames,casess,tenders)"+\
                "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(uid,basic_info_list,dates,regCapital,regStatus,legalPersonName,cc[1],cc[2],cc[3],cc[4],cc[5],cc[6],cc[7],cc[8],cc[9],cc[10],investments,outinvestments,staffpositions,staffnames,casess,tenders))

    # cur.connection.commit()
    # uid = cur.execute("select id from enterprise where basic_info_list ="+basic_info_lists)
    # uid = cur.execute("select eid from enterpriseinfo where ename like %"+basic_info_lists+"%")
    # uid = cur.fetchone()
    # uid = uid[0]
    # uids = str(uid)

    # 法院公告
    if basic_info_list in ee:
     leng = len(ff)
     sum = 0
     if uid in oo:
      cur.execute("delete from court where uid="+uids)
     while (sum < leng):
          cur.execute("INSERT INTO court (uid,appeal,bvprosecution,cours,announcementtype) VALUES(%s,%s,%s,%s,%s)",(uid,ff[sum][1],ff[sum][2],ff[sum][3],ff[sum][4]))
          sum = sum+1
          if sum >= leng:
              break
    else:
     leng = len(ff)
     sum = 0
     while (sum < leng):
         cur.execute("INSERT INTO court (uid,appeal,bvprosecution,cours,announcementtype) VALUES(%s,%s,%s,%s,%s)",(uid,ff[sum][1],ff[sum][2],ff[sum][3],ff[sum][4]))
         sum = sum+1
         if sum >= leng:
             break

    # 专利信息
    if basic_info_list in ee:
     lengh = len(pp)
     sums = 0
     if uid in kk:
      cur.execute("delete from patent where uid="+uids)
     while (sums < lengh):
          cur.execute("INSERT INTO patent (uid,patentname,applyrelease,applynumber,applydate,classification) VALUES(%s,%s,%s,%s,%s,%s)",(uid,pp[sums][0],pp[sums][1],pp[sums][2],pp[sums][3],pp[sums][4]))
          sums = sums+1
          if sums >= lengh:
              break
    else:
        lengh = len(pp)
        sums = 0
        while (sums < lengh):
          cur.execute("INSERT INTO patent (uid,patentname,applyrelease,applynumber,applydate,classification) VALUES(%s,%s,%s,%s,%s,%s)",(uid,pp[sums][0],pp[sums][1],pp[sums][2],pp[sums][3],pp[sums][4]))
          sums = sums+1
          if sums >= lengh:
             break

    # 著作权
    if basic_info_list in ee:
     lenght = len(ll)
     sumes = 0
     if uid in mm:
      cur.execute("delete from copyright where uid="+uids)
     while (sumes < lenght):
          cur.execute("INSERT INTO copyright (uid,softwarename,softwareas,registrationnum,classificationnum,versionnum,copyrightowner,firstdate,registrationdate) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)",(uid,ll[sumes][0],ll[sumes][1],ll[sumes][2],ll[sumes][3],ll[sumes][4],ll[sumes][5],ll[sumes][6],ll[sumes][7]))
          sumes = sumes+1
          if sumes >= lenght:
              break
    else:
        lenght = len(ll)
        sumes = 0
        while (sumes < lenght):
            cur.execute("INSERT INTO copyright (uid,softwarename,softwareas,registrationnum,classificationnum,versionnum,copyrightowner,firstdate,registrationdate) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)",(uid,ll[sumes][0],ll[sumes][1],ll[sumes][2],ll[sumes][3],ll[sumes][4],ll[sumes][5],ll[sumes][6],ll[sumes][7]))
            sumes = sumes+1
            if sumes >= lenght:
                break
    return data

# 执行
if "php" in sys.argv:
  lists = [urllib.request.unquote(sys.argv[1])]
else:
  #企业列表
  #判断是否已经存在数据
  sqls = "select searchname from enterpriseinfo where status = 1 and searchname is not null"
  cur.execute(sqls)
  #企业列表
  lists = []
  result_list = cur.fetchall()
  for result in result_list:
    lists.append(result[0])

  if lists == None:
    # lists = ["拓尔思","安硕信息","方正科技","东软集团","中兴通讯","中国神华","华能国际","广发证券","万科","宝钢","山煤","韶钢"]
    lists = ['华为','联想电脑集团','小米','渤海征信','大公国际信用评级集团有限公司','东软集团','中国神华能源','华能国际电力股份有限公司','宜信公司','万科集团','联想','宝钢集团']

listslenght = len(lists)
nums = 0
while (nums < listslenght):
   get_enterprise_data(lists[nums])
   # print(lists[nums])
   nums = nums+1
   if nums >= listslenght:
        print('over')
        print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
        break
# data = get_enterprise_data('易霖')
cur.close()
conn.close()