import requests
from lxml import etree
import pymysql


header = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36"
        }

#保存狗狗详情数据
#保存数据
def saveDogDetail(dogObject):
    count = pymysql.connect(
        host='47.94.156.155',  # 数据库地址
        port=3306,  # 数据库端口
        user='guagua',  # 数据库账号
        password='15912504206',  # 数据库密码
        db='guagua'  # 数据库名
    )
    # 创建数据库对象
    db = count.cursor()
    # 写入sql
    # print("写入数据:"+dogObject.to_string())
    # print("写入name:" + dogObject.name)
    sql = f"insert into dog_detail(varieties,name,alias,distribution_area,provenance,shape,height,weight,life_span,img_url,detail_url,price,type) " \
          f"values ('{dogObject.varieties}','{dogObject.name}','{dogObject.alias}','{dogObject.distributionArea}','{dogObject.provenance}','{dogObject.shape}','{dogObject.height}'," \
          f"'{dogObject.weight}','{dogObject.lifeSpan}','{dogObject.imgUrl}','{dogObject.detailUrl}','{dogObject.price}','{dogObject.type}')"
    # 执行sql
    print(sql)
    db.execute(sql)
    # 保存修改内容
    count.commit()
    db.close()

# 狗狗详情信息
class dogDetail():
    def __init__(self, varieties, name, alias,distributionArea, provenance, shape,height,weight,lifeSpan,imgUrl,detailUrl,price,type):
        self.varieties = varieties #种类
        self.name = name#中文学名
        self.alias = alias#别名
        self.distributionArea = distributionArea#分布区域
        self.provenance = provenance#原产地
        self.shape = shape#体型
        self.height = height#身高
        self.weight = weight#体重
        self.lifeSpan = lifeSpan#寿命
        self.imgUrl = imgUrl#图片路径
        self.detailUrl = detailUrl#详情路径
        self.price = price#价格范围
        self.type = type #种类


    def to_string(self):
        print("种类:"+self.varieties
              +";中文学名:"+self.name
              +";别名:"+self.alias
              +";分布区域:"+self.distributionArea
              +";原产地:"+self.provenance
              +";体型:"+self.shape
              +";身高:"+self.height
              +";体重:"+self.weight
              +";寿命:"+self.lifeSpan
              +";图片路径:"+self.imgUrl
              +";详情路径:"+self.detailUrl
              +";价格范围:" + self.price
              +";品种:" + self.type)

# 获取狗狗详情数据
def getDogDetail(detailurl,imgUrl):
    print("详情路径："+detailurl)
    #获取详细信息
    detailHtml = requests.get(detailurl,headers=header)
    detailHtml.encoding = 'gb2312'
    # print(detailHtml.text)
    etreeDetailHtml = etree.HTML(detailHtml.text)
    varieties = ''.join(etreeDetailHtml.xpath('//*[@class="c1text1"]/h1/text()')).replace('[', '').replace(']', '').replace(
        "'", '')  # 狗狗种类
    price = ''.join(etreeDetailHtml.xpath('//*[@class="cankao"]/strong/text()'))  # 价格范围
    # print("价格:" + price)
    detail = etreeDetailHtml.xpath('//*[@class="c1text3"]')
    dogDetailList = []
    for _ in detail:
        for num in range(8):
            data = ''.join(_.xpath('./li[' + str(num + 1) + ']/a/text()')).replace('[', '').replace(']', '').replace("'", '')
            if data is None:
                dogDetailList.append("")
            # print(data)
            else:
                dogDetailList.append(data)
        # print(dogDetailList)
    name = dogDetailList[0]  # 中文学名
    # print("姓名：" + name)
    alias = dogDetailList[1]  # 别名
    distributionArea = dogDetailList[2]  # 分布区域
    provenance = dogDetailList[3]  # 原产地
    shape = dogDetailList[4]  # 体型
    height = dogDetailList[5]  # 身高
    weight = dogDetailList[6]  # 体重
    lifeSpan = dogDetailList[7]  # 寿命
    types = ["狗狗","猫猫","兔兔","虫宠","水族","宠物鼠","宠物貂","宠物鸟","两栖爬行","另类动物"]
    dogData = dogDetail(varieties,name,alias,distributionArea,provenance,shape,height,weight,lifeSpan,imgUrl,detailurl,price,
                        types[9])
    # dogData.to_string()
    #存储详情到数据库
    saveDogDetail(dogData)

 # @Descript:获取狗狗品种列表信息
 # @Parma：总页码
def getDogList(pageSum):
    num = 0
    for pageSize in range(pageSum):
        # 狗狗 10
        # url = "https://www.ixiupet.com/ggpz/list_8_"+str(pageSize+1)+".html"
        # 猫猫 4
        # url = "https://www.ixiupet.com/mmpz/list_9_" + str(pageSize + 1) + ".html"
        # 兔子 2
        # url = "https://www.ixiupet.com/tzpz/list_10_" + str(pageSize + 1) + ".html"
        # 虫宠 10
        # url = "https://www.ixiupet.com/ccpz/list_11_"+ str(pageSize + 1) + ".html"
        # 水族 2
        # url = "https://www.ixiupet.com/szpz/list_12_"+ str(pageSize + 1) + ".html"
        # 宠物鼠 6
        # url = "https://www.ixiupet.com/cwspz/list_13_" + str(pageSize + 1) + ".html"
        # 宠物貂 2
        # url = "https://www.ixiupet.com/cwdpz/list_14_" + str(pageSize + 1) + ".html"
        # 宠物鸟 2
        # url = "https://www.ixiupet.com/cwnpz/list_15_" + str(pageSize + 1) + ".html"
        # 两栖爬行 3
        # url = "https://www.ixiupet.com/pxpz/list_16_" + str(pageSize + 1) + ".html"
        # 另类动物 2
        url = "https://www.ixiupet.com/llpz/list_17_" + str(pageSize + 1) + ".html"
        print("页面路径："+url)
        dogListHtml = requests.get(url=url, headers=header)
        dogListHtml.encoding = 'gb2312'
        # print(dogListHtml.text)
        etreeDogListHtml = etree.HTML(dogListHtml.text)
        a = etreeDogListHtml.xpath('//*[@class="tiyan-smll-li"]')
        for _ in a:
            detailUrl = ''.join(_.xpath('./@href')).replace('[','').replace(']','').replace("'",'')
            imgUrl = ''.join(_.xpath('./img/@src'))
            getDogDetail(detailUrl,imgUrl)
            num += 1
            print("爬取第"+str(num)+'条数据')
    print("爬取完第"+str(pageSize+1)+"页数据")


if __name__ == "__main__":
    getDogList(2)
    # save_sql()

