import csv
import re
import urllib
import requests
from bs4 import BeautifulSoup

#获取html数据
lists = []
def getWebData(url):
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36 Edg/96.0.1054.62",
        #"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:95.0) Gecko/20100101 Firefox/95.0",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        #"accept-language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "cache-control": "max-age=0",
        "host":"www.tianyancha.com",
        "Cookie": "jsid=SEO-BAIDU-ALL-SY-000001; TYCID=7b8db49062c011ec8c34992eb49aaffc; ssuid=7105849031; _ga=GA1.2.1905799692.1640133942; _gid=GA1.2.517367904.1640133942; _bl_uid=1Xk3OxyFg7qw09ah7vk9twO5w6g8; creditGuide=1; tyc-user-phone=%255B%252215007065107%2522%255D; aliyungf_tc=eb7cc98778343e7d0994fea89e643629997c39e2d5c2e49455bdeeede1db94bf; csrfToken=8vWbFW4i4kTbaMuZzGx4w1qi; bannerFlag=true; tyc-user-info={%22state%22:%220%22%2C%22vipManager%22:%220%22%2C%22mobile%22:%2215007065107%22}; tyc-user-info-save-time=1640227424372; auth_token=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNTAwNzA2NTEwNyIsImlhdCI6MTY0MDIyNzQyNCwiZXhwIjoxNjcxNzYzNDI0fQ.42JUBONMeM5Jd-PI7f7EuG2kX66aUCZlGzbJFUlcFbnoT7ddTLavIH30UCvZ7BNu6SHs-XZaodKa_tOgI8ZEXQ; source_channel_code=baidu; Hm_lvt_e92c8d65d92d534b0fc290df538b4758=1640227481,1640234218,1640247361,1640273735; RTYCID=7123937b244340a5bcc66d470c4f7084; CT_TYCID=a964e578c77247d79f5be58175d1f700; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2215007065107%22%2C%22first_id%22%3A%2217ddf99d0e03d9-00a308e303790c-4c607a68-1327104-17ddf99d0e1e1b%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%2217ddf99d0e03d9-00a308e303790c-4c607a68-1327104-17ddf99d0e1e1b%22%7D; searchSessionId=1640320532.85960857; cloud_token=7e0585e2da6d4e1fb4eb9504d4c4b948; acw_tc=2f6fc12916403292739087434e52558a0ec7f7710b3c4f7f91ad34d09aceaa; Hm_lpvt_e92c8d65d92d534b0fc290df538b4758=1640329284",
        #"referer":"https://www.tianyancha.com/search?key=%E4%B8%AD%E5%9B%BD%E5%9B%BD%E9%99%85%E6%97%85%E8%A1%8C%E7%A4%BE%E6%80%BB%E7%A4%BE%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8%E8%A7%84%E6%A8%A1%E5%88%86%E5%B8%83"
    }
    request = urllib.request.Request(url=url, headers=headers)
    #request = urllib.request.Request(url,headers = headers)
    response = urllib.request.urlopen(request)
    html = response.read().decode("utf-8")
    return html
findLink = re.compile(r'.*<a class="name" href="(.*?)".*')
findLink2 = re.compile(r'.*<td>企业类型</td><td>(.*?)</td><td>.*')

#信息获取
def GetEssentialInformation(link):
    comname = ''
    name = ''
    Tlink = ''
    numberfrom = ''
    thenumber = ''
    companytype = ''
    companyAddress = ''
    money = ''
    t = getWebData(link)
    bs = BeautifulSoup(t,'html.parser')
    t_NameList = bs.select("body > div[class='mt74'] > div[class='container -top'] > div[class='tabline'] > div[class='tabline-right'] > div[class='container company-header-block'] > div[class='box -company-box'] > div[class='content'] > div[class='header'] > span[class='copy-info-box'] > span > h1[class='copy-it name info-need-copy _title']")

    #获取公司名
    if(len(t_NameList) != 0):
        tt = str(t_NameList[0])
    else:
        tt = ''

    findName = re.compile(r'.*">(.*?)</h1>')
    try:
        companyName = re.findall(findName,tt)[0]
        comname = (companyName)
    except Exception as e:
        comname = '-'

    # 法人名
    t_LegalRepresentative = bs.select("body > div[class='mt74'] > div[class='container -top'] > div[class='tabline'] > div[class='tabline-right'] > div[class='container company-header-block'] > div[class='box -company-box'] > div[class='content'] > div[class='detail'] > div[class='f0 boss']")
    findLegalRepresentative = re.compile(r'.*title="(.*?)">')
    try:
        tt = str(t_LegalRepresentative[0])
        legalRepresentative = re.findall(findLegalRepresentative,tt)[0]
        name = (legalRepresentative)
    except Exception as e:
        name = '-'

    #电话号码
    findNumber = re.compile(r'.*value="(.*?)"/><span class.*')
    t_Number = bs.select("body > div[class='mt74'] > div[class='container -top'] > div[class='tabline'] > div[class='tabline-right'] > div[class='container company-header-block'] > div[class='box -company-box'] > div[class='content'] > div[class='detail'] > div[class='f0'] > div[class='in-block sup-ie-company-header-child-1 copy-info-box']")
    try:
    #print(t_Number)
        tt = str(t_Number[0])
        number = re.findall(findNumber,tt)[0]
        thenumber = (number)
    except Exception as e:
        thenumber = '-'

    #获取公司链接
    t_findCompanyLink = bs.select("body > div[class='mt74'] > div[class='container -top'] > div[class='tabline'] > div[class='tabline-right'] > div[class='container company-header-block'] > div[class='box -company-box'] > div[class='content'] > div[class='detail'] > div[class='f0 clearfix mb0 address'] > div[class='in-block sup-ie-company-header-child-1'] > a[class='company-link']")
    findCompanyLink = re.compile(r'.*href="(.*?)".*')
    try:
        tt = str(t_findCompanyLink[0])
        companyLink = re.findall(findCompanyLink,tt)[0]
        Tlink = (companyLink)
    except Exception as e:
        Tlink = '-'
    # 获取人员规模分布
    findPersonnelSize = re.compile(r'.*<td>人员规模</td><td>(.*?)</td></tr>.*')
    #t = getWebData(link)
    #bs = BeautifulSoup(t, 'html.parser')
    t_list = bs.select(
        "body > div[class='mt74'] > div[class='container -top'] > div[class='container'] > div[class='tabline'] > div[class='box-container -main'] > div[class='detail-list'] > div[class='block-data-group mt24'] > div[class='block-data'] > div[class='data-content'] > table[class='table -striped-col -breakall'] > tbody")
    if (len(t_list) != 0):
        tt = str(t_list[0])
        personnelSize = re.findall(findPersonnelSize, tt)[0]
        numberfrom = personnelSize
    else:
        numberfrom = '-'

    #获取企业类型
    t_list = bs.select(
        "body > div[class='mt74'] > div[class='container -top'] > div[class='container'] > div[class='tabline'] > div[class='box-container -main'] > div[class='detail-list'] > div[class='block-data-group mt24'] > div[class='block-data'] > div[class='data-content'] > table[class='table -striped-col -breakall'] > tbody")
    try:
        tt = str(t_list[0])
        enterpriseType = re.findall(findLink2, tt)[0]
        companytype = enterpriseType
    except Exception as e:
        companytype = '-'

    # 获取企业地址
    t_list = bs.select(
        "body > div[class='mt74'] > div[class='container -top'] > div[class='tabline'] > div[class='tabline-right'] > div[class='container company-header-block'] > div[class='box -company-box'] > div[class='content'] > div[class='detail'] > div[class='f0 clearfix mb0 address'] > div[class='in-block sup-ie-company-header-child-2 copy-component-box'] > span[style='display:inline-block;'] > div[data-clipboard-action='copy'] > div[style='max-height:16px;'] > div[class='detail-content element-need-copy']")
    findCompanyAddress = re.compile(r'.*element-need-copy">(.*?)\xa0\xa0</div>')
    try:
        tt = str(t_list[0])
        companyAddress = re.findall(findCompanyAddress, tt)[0]
    except Exception as e:
        companyAddress = '-'

    # 获取企业注册资金
    t_list = bs.select(
        "body > div[class='mt74'] > div[class='container -top'] > div[class='container'] > div[class='tabline'] > div[class='box-container -main'] > div[class='detail-list']")
    findMoney = re.compile((r'.*</td><td width=""><div title="(.*?)">.*'))
    try:
        tt = str(t_list[0])
        money = re.findall(findMoney,tt)[0]
    except Exception as e:
        money = '-'
    #print(money)
    temp = (comname, name, Tlink, numberfrom, thenumber, companytype,companyAddress,money)
    #print(temp)
    lists.append(temp)




#获取第n页的公司链接
def GetLinks(n):
    theLink = "https://www.tianyancha.com/search/p" + str(n) + "?key=%E6%97%85%E8%A1%8C%E7%A4%BE"
    t = getWebData(theLink)
    bs = BeautifulSoup(t, "html.parser")
    t_list = bs.select("body > div[class='mt122'] > div[class='container -top'] > div[class='container-left'] > div[class='search-block header-block-container'] > div[class='result-list sv-search-container'] > div[class='search-item sv-search-company'] > div[class='search-result-single'] > div[class='content'] > div[class='header']")
    links = []
    for i in range(len(t_list)):
        tt = str(t_list[i])
        # print(tt)
        link = re.findall(findLink, tt)
        links.append(link[0])
        # print(link)
    return links
def main():
    #GetEssentialInformation("https://www.tianyancha.com/company/7983841")
    #print(links)
    count = 0
    for j in range(1,6):
        links = GetLinks(j)
        for i in range(len(links)):
            #调用函数获取来数据
            GetEssentialInformation(links[i])
            count+=1
            print("进度大约：",count,"%")
    print("完成")



if __name__ == '__main__':
    try:
         main()
    except Exception as e:
        print('完成')
    #将数据写入csv
    csvfile = open("csv_test4.csv", "w", newline="")
    writer = csv.writer(csvfile)
    writer.writerow(('公司名', "法人姓名", '公司链接', "公司人数规模", '电话', "企业类型", "公司地址", "公司注册资金"))
    writer.writerows(lists)
    csvfile.close()






















