import requests
from lxml import etree
import re
import csv

def getContent(url):
    try:
        agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'
        session=requests.session()
        response=session.get(url=url,headers={"User-Agent": agent})
        response.raise_for_status()
        response.encoding=response.apparent_encoding
    except Exception as e:
        print("爬取错误")
    else:
        response.close()
        return response.content.decode("utf-8")

#company name 公司名称
def getCompany_Name(tree):
    # dispose 处理  存放公司名称
    # replace('n','')  去除换行
    num = 0
    dispose = []
    divList = tree.xpath('//*[@id="utopia_widget_76"]/a/div/p/text()')
    for i in divList:
        if (num % 2):
            dispose.append(i.replace('\n',''))
        num += 1
    return dispose

#通用Xpath
#返回为list
def common_Xpath(tree,xpath):
    common_Param=[]
    common_List=tree.xpath(xpath)
    for i in common_List:
        common_Param.append(i)
    return common_List

def parseContent(htmlContent):
    #priceXpath 商家价格xpath地址
    #barginXpath 商家交易次数xpath地址
    #explainXpath 商家说明
    #num 用于计数
    num=0
    priceXpath = '//*[@id="utopia_widget_76"]/a[2]/div[2]/div[1]/span[1]/text()'
    barginXpath = '//*[@id="utopia_widget_76"]/a[2]/div[2]/div[1]/span[2]/text()'
    explain= '// * [ @ id = "utopia_widget_76"] / a[2] / div[2] / div[2] / p/text()'
    tree=etree.HTML(htmlContent)
    company_Name=getCompany_Name(tree)
    price=common_Xpath(tree,priceXpath)
    bargin=common_Xpath(tree,barginXpath)
    explain=common_Xpath(tree,explain)  #数据需要处理
    f=open('resource/zbjData.csv',mode='w',encoding="utf-8")
    writer=csv.writer(f)
    for i in company_Name:
        companyDic=i
        priceDic=price[num]
        barginDic=bargin[num]
        explainDic=explain[num]
        writer.writerow([companyDic,priceDic,barginDic,explainDic])
        num+=1
    f.close()
    print("over!!")
if __name__ == '__main__':
    url="https://chengdu.zbj.com/search/f/?kw=sass"
    htmlContent=getContent(url)
    parseContent(htmlContent)