# -*- coding: utf-8 -*-
# @Time    : 2021/01/07 23:09
# @Author  : Albert.Wang
# @File    : QCC_spider_nologin.py
# @Software: VSCode

import requests,json,re,time,multiprocessing
from requests.exceptions import RequestException
from urllib.parse import urlencode
from pyquery import PyQuery as pq
from multiprocessing import Pool
from config import *


#报文头
headers = {
        "cookie":"acw_tc=b7f0d71516100176028661449eb9d97ddf12a339cdddfa875bd697d06c; QCCSESSID=ttci0f8pjufmb46gia0vvl6hi4; UM_distinctid=176dc8701069-0f663172ec2b67-5a301e44-1fa400-176dc87010781c; CNZZDATA1254842228=2026673054-1610016355-%7C1610016355; zg_did=%7B%22did%22%3A%20%22176dc870118316-0cd183b16ee126-5a301e44-1fa400-176dc87011a461%22%7D; hasShow=1; _uab_collina=161001760447347687100094; zg_de1d1a35bfa24ce29bbf2c7eb17e6c4f=%7B%22sid%22%3A%201610017603870%2C%22updated%22%3A%201610017767648%2C%22info%22%3A%201610017603874%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22%22%2C%22cuid%22%3A%20%22af37a801a87f0779f8d1d14c666390c6%22%7D",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66"
        }

#获取索引页
def get_page_index(keyword):
    data={'key':keyword}
    url='https://www.qcc.com/web/search?'+urlencode(data)
    try:
        response = requests.get(url,headers=headers)
        if response.status_code==200:
            # with open('response.txt','w',encoding='utf8') as f:
            #     f.write(response.text)
            return response.text
        return None
    except RequestException:
        print("请求索引页url:{0}出错".format(url))
        return None
    
#解析索引页
def parse_page_index(html):
    #print(html)
    # pattern = re.compile(
    #     '<tr>.*?href="(.*?)".*?>(.*?)</a>.*?href.*?>(.*?)</a>.*?>(.*?)</span>.*?>(.*?)</span>.*?<p.*?>(.*?)<span.*?>(.*?)</span>.*?<p.*?<em>(.*?)</em>(.*?)</p>.*?<span.*?>(.*?)</span.*?</tr>',
    #     re.S)#取索引页所有数据
    pattern = re.compile('"maininfo".*?href="(.*?)".*?<em>(.*?)</span>',re.S)#只取详情页地址和公司名称
    result = re.findall(pattern,html)
    #print(result)
    #只取前3个结果
    for item in result[:3]:
        yield {
            'detail_url':item[0],
            'company':re.sub(r'<em>|</em>','',item[1])  #将获得的此格式内容再替换为纯公司名"<em>广州</em>侨<em>合</em>建设有限公司"
            # 'LegalRepresentative':item[2],
            # 'RegisteredCapital':item[3].strip()[5:],
            # 'CreatedTime':item[4].strip()[5:],
            # 'Email':item[6],
            # 'Phone':item[6],
            # 'Address':item[7]+item[8],
            # 'State':item[9]
        }

#获取详情页
def get_page_detail(company,detailurl):
    url = detailurl
    #print(url)
    print('开始爬取:{0},网址：{1}'.format(company,url))
    try:
        response = requests.get(url,headers=headers)
        if response.status_code==200:
            #print(200)
            return response.text
        return None
    except RequestException:
        print("请求详情页公司名{0}，url:{1}出错！".format(company,url))

#解析详情页
def parse_page_detail(html,detailurl):
    url = detailurl
    doc = pq(html)
    company = doc('.container.p-t >#company-top > div.row > div.content > div.row.title.jk-tip > h1').text()  # 公司名称
    state = doc('.container.p-t >#company-top > div.row > div.content > div.row.tags > span.ntag.text-success.tooltip-br').text()  # 经营状态
    phone = doc('.container.p-t >#company-top > div.row > div.content > div.dcontent > div:nth-child(1) > span.fc > span.cvlu > span:nth-child(2)').text()  # 联系电话
    official_website = doc('.container.p-t >#company-top > div.row > div.content > div.dcontent > div:nth-child(1) > span.cvlu > a').text()  # 官网
    email = doc('.container.p-t >#company-top > div.row > div.content > div.dcontent > div:nth-child(2) > span.fc > span.cvlu > a:nth-child(1)').text()  # 邮箱
    address = doc('.container.p-t >#company-top > div.row > div.content > div.dcontent > div:nth-child(2) > span.cvlu > a:nth-child(1)').text()  # 公司地址
    # introduction = doc('#textShowMore').text()#简介
    # boss = doc('#Cominfo > table:nth-child(3) > tr:nth-child(2) > td.ma_left > div > div.clearfix > div:nth-child(2) > a.bname > h2').text()  # 法人代表
    # business_relations = doc('#Cominfo > table:nth-child(3) > tr:nth-child(2) > td:nth-child(2) > div.ba-table-base > a').attr('href')  # 企业关联图谱链接
    # registered_capital = doc('#Cominfo > table:nth-child(4) > tr:nth-child(1) > td:nth-child(2)').text()  # 注册资本
    # paid_capital = doc('#Cominfo > table:nth-child(4) > tr:nth-child(1) > td:nth-child(4)').text()  # 实缴资本
    # create_date = doc('#Cominfo > table:nth-child(4) > tr:nth-child(2) > td:nth-child(4)').text()  # 成立日期
    # credit_code = doc('#Cominfo > table:nth-child(4) > tr:nth-child(3) > td:nth-child(2)').text()  # 统一社会信用代码
    # registration_number = doc('#Cominfo > table:nth-child(4) > tr:nth-child(4) > td:nth-child(2)').text()  # 注册号
    # organization_code = doc('#Cominfo > table:nth-child(4) > tr:nth-child(4) > td:nth-child(4)').text()  # 组织机构代码
    # company_type = doc('#Cominfo > table:nth-child(4) > tr:nth-child(5) > td:nth-child(2)').text()  # 公司类型
    # industry_involved = doc('#Cominfo > table:nth-child(4) > tr:nth-child(5) > td:nth-child(4)').text()  # 所属行业
    # approval_date = doc('#Cominfo > table:nth-child(4) > tr:nth-child(6) > td:nth-child(2)').text()  # 核准日期
    # registration_authority = doc('#Cominfo > table:nth-child(4) > tr:nth-child(6) > td:nth-child(4)').text()  # 登记机关
    # area = doc('#Cominfo > table:nth-child(4) > tr:nth-child(7) > td:nth-child(2)').text()  # 所属地区
    # english_name = doc('#Cominfo > table:nth-child(4) > tr:nth-child(7) > td:nth-child(4)').text()  # 英文名
    # former_name = doc('#Cominfo > table:nth-child(4) > tr:nth-child(8) > td:nth-child(2)').text()  # 曾用名
    # insured_number = doc('#Cominfo > table:nth-child(4) > tr:nth-child(8) > td:nth-child(4)').text()  # 参保人数
    # staff_size = doc('#Cominfo > table:nth-child(4) > tr:nth-child(9) > td:nth-child(2)').text()  # 人员规模
    # business_term = doc('#Cominfo > table:nth-child(4) > tr:nth-child(9) > td:nth-child(4)').text()  # 营业期限
    # business_scope = doc('#Cominfo > table:nth-child(4) > tr:nth-child(11) > td:nth-child(2)').text()  # 经营范围
    # equity_through = doc('#guquanIframeTool > a:nth-child(1)').attr('href')  # 股权穿透图链接
    result = {
        'url':url,
        'company': company, 'state': state,
        'phone': phone, 'official_website': official_website,
        'email': email, 'address': address
        # ,
        # 'boss': boss, 'business_relations': business_relations,
        # 'registered_capital':registered_capital,'paid_capital':paid_capital,
        # 'create_date': create_date,'credit_code':credit_code,
        # 'registration_number': registration_number,'organization_code':organization_code,
        # 'company_type': company_type,'industry_involved':industry_involved,
        # 'approval_date': approval_date,'registration_authority':registration_authority,
        # 'area': area,'english_name':english_name,
        # 'former_name': former_name,'insured_number':insured_number,
        # 'staff_size': staff_size,'business_term':business_term,
        # 'business_scope': business_scope,'equity_through':equity_through
    }
    return result

def write_to_file(company,result,fn):
    # with open('{0}.txt'.format(company),'a',encoding='utf-8')as f:
    #     f.write(json.dumps(result,ensure_ascii=False))
    with open(fn,'a',encoding='utf8') as f:
        print('{0}，保存成功'.format(company))
        f.write(json.dumps(result,ensure_ascii=False)+'\n')

# def main(i):
def main(KEYWORD):
    # html = get_page_index(i,KEYWORD)
    html = get_page_index(KEYWORD)
    #print(html)
    fn='result_'+time.strftime("%Y%m%d_%H%M%S", time.localtime())+'.txt'
    for item in parse_page_index(html):
        #print(item['Company'],item['Detail_url'])
        text = get_page_detail(item['company'],item['detail_url'])#获取详情页内容
        if text:
            result=parse_page_detail(text,item['detail_url'])
            write_to_file(item['company'],result,fn)
            #save_to_mongo(result)

if __name__=="__main__":
    KEYWORD = '广州合道' #搜索关键词
    # main(KEYWORD)
    pool = Pool(processes=multiprocessing.cpu_count())
    pool.apply_async(main,(KEYWORD,))
    pool.close()
    pool.join()
    print('爬取结束！')