#抓取土巴兔数据

import requests
from lxml import etree
from random import choice
import random
import time


ua_list = [
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.517 Safari/537.36',
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it-IT) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
    'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36',
    'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322)',
    'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0',
    'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20130514 Firefox/21.0',
    'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/22.0',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F',
    'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
]

headers = {
    'User-Agent': random.choice(ua_list)
}

def hqlj(dq,num):
    url=f"https://{dq}.to8to.com/company/list_{num}.html"
    try:
        html=requests.get(url,headers=headers)
        #print(html)
        html=html.text
        #print(html)
        con=etree.HTML(html)
        li=con.xpath('//li[@class="company-data "]/a/@href')
        #print(li)
        names=con.xpath('//p[@class="company__name"]/span/text()')
        #print(names)
        for name in names:
            name=name.strip('\n')
            #print(name)
            with open('gsm.txt','a+',encoding='utf-8') as f:
                f.write(f'{name}\n')

        return li
    except:
        print(f"获取{url}列表失败！请检查网络或者是否网页打不开！")
        pass
    time.sleep(5)

def hqhm(url):
    print(url)
    try:
        html = requests.get(url, headers=headers)
        #print(html)
        html = html.text
        #print(html)
        con = etree.HTML(html)
        div=con.xpath('//div[@class="head-com-tel shopTreasure pg-block-show pg-block-click"]/text()')
        if div==[]:
            div="--空--\n"
            gsmc = con.xpath('//span[@class="com-name"]/text()')
            dz = con.xpath('//p[@class="address"]/text()')
        else:
            div=div[1]
            gsmc = con.xpath('//span[@class="com-name"]/text()')
            dz = con.xpath('//p[@class="address shopTreasure"]/text()')

        print(div)
        print(gsmc)
        print(dz)
        with open('lxfs.txt', 'a+', encoding='utf-8') as f:
            f.write(f'{url},{gsmc[0]},{dz[0]},{div}')
        print(f"保存{gsmc[0]}数据成功！")
        time.sleep(5)
        return div
    except:
        print(f"访问{url}网页失败！请检查网络或者是否网页打不开！")
        pass



if __name__ == '__main__':
    dq=input("请输入要爬取的地区字母（比如深圳为sz）：")
    ym=input("请输入要爬取的页码数：")
    print("开始爬取列表，请稍后......")
    ym=int(ym)
    for i in range(1,ym+1):
        ljs=hqlj(dq,i)
        for url in ljs:
            lx=hqhm(url)