# -*- coding: utf-8 -*-


from lxml import etree
from fake_useragent import UserAgent
import time
import random
import os
import requests
import shutil
import base64
import re
from requests.auth import HTTPProxyAuth


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

# 代理服务器
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = "H01I644F9J45ODID"
proxyPass = "C61D2EF7EE18A03A"


# 获取页面url
def get_url():
    for page in range(1, 10):
        print("======当前页:" + str(page) + "==========")
        url = f"https://sh.58.com/zhuce/pn{page}/?PGTID=0d30007d-0000-2313-57d7-fda8f706972d&ClickID=3" \
        # url = "http://test.abuyun.com/"

        html = get_request(url)
        print("=======已获取首页========")
        # auth = HTTPProxyAuth(proxyUser, proxyPass)
        # 对每页的每个内容的id即
        selector = etree.HTML(html.text)
        # 检测是否有验证码
        if (len(selector.xpath('//p[@class="title"]')) > 0):
            yzm = selector.xpath('//p[@class="title"]')[0].text
            if yzm.find("验证码") >= 0:
                print(yzm)
                return
        contents = selector.xpath('//*[@class="tdiv"]')
        for c in contents:
            childUrl = c.xpath('./a/@href')[0]
            title = "".join(c.xpath('./a//text()')).replace("\n", "").replace("\t", "").replace("\r", "") \
                .replace(" ", "").replace("\\", "")
            desc = "".join(c.xpath('./div[@class="item-desc"]/text()')[0].replace("\n", "").replace("\t", ""))
            company = "".join(c.xpath('./p[@class="seller"]//text()')).replace("\n", "").replace("\t", "").replace("\r", "") \
                .replace(" ", "").replace("\\", "")

            # time.sleep(random.randint(1, 2))
            # html1 = requests.get(url=childUrl, proxies=proxies, headers=headers)
            html1 = get_request(childUrl)
            print("=======已获取【分页】========")
            selector1 = etree.HTML(html1.text)
            lsr = selector1.xpath('//div[@class="infocard__container__item infocard__container__item--contact"]/div[@class="infocard__container__item__main"]/text()')
            lsr = "".join(lsr).replace("\n", '').replace(" ", "").replace("\r", "").replace("\t", '')
            address = selector1.xpath('//div[@class="infocard__container__item infocard__container__item--shopaddress"]/'
                                      'div/a/text()')
            address += selector1.xpath('//div[@class="infocard__container__item infocard__container__item--shopaddress"]/'
                                       'div[@class="infocard__container__item__main"]/text()')
            address = "".join(address).replace("\n", '').replace(" ", "").replace("\r", "").replace("\t", '')
            desc1 = selector1.xpath('//article[@class="description_con"]/'
                                    'text()')
            imgs = selector1.xpath('//*[@id="img_player1"]/ul/li/span/img/@src')
            print(title)
            print(company)
            # 下载图片
            try:
                os.mkdir(title)
            except :
                pass
            print("=======下载图片=======")
            picNum = 1
            for u in imgs:
                with open(str(picNum) + ".jpg", "wb") as f:
                    if(u.find("http") < 0):
                        u = "http:" + u
                    # img = (requests.get(u, proxies=proxies, headers=headers)).content
                    img = get_request(u).content
                    f.write(img)
                    # time.sleep(random.randint(1, 2))
                    picNum += 1
                    f.close()
            print("========创建文本文件=========")
            with open(company + ".txt", "a", encoding="utf-8") as fp:
                fp.write("标题:" + title + '\n')
                fp.write("公司名称:" + company + '\n')
                fp.write("地址:" + address + '\n')
                fp.write("联系人:" + lsr + '\n')
                try:
                    fp.write("内容:" + desc1[0])
                except Exception as ex:
                    print("--------【创建】文件错误---------")
                    print(ex)
                    fp.write("内容:")
            # 对爬取的图片和信息进行移入文件夹
            try:
                shutil.move(company + ".txt", title)
                for ll in range(1, picNum):
                    shutil.move(str(ll) + ".jpg", title)
            except Exception as ex:
                print("--------【移动】文件错误---------")
                print(ex)
                pass
        # time.sleep(random.randint(1, 2))


def get_request(url):
    try:
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        headers = {
            "User-Agent": ua.random,
            # 'Proxy-Authorization': "Basic " + base64.urlsafe_b64encode(
            #     bytes((proxyUser + ":" + proxyPass), "ascii")).decode("utf8")
            "Proxy-Authorization": "Basic " + base64.b64encode((proxyUser + ":" + proxyPass).encode('utf-8'))
                .decode()
        }
        proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
            "host": proxyHost,
            "port": proxyPort,
            "user": proxyUser,
            "pass": proxyPass,
        }
        proxies = {
            "http": proxyMeta,
            "https": proxyMeta,
        }
        # html = requests.get(url, headers=headers)
        html = requests.get(url, proxies=proxies, headers=headers)
    except Exception as ex:
        print("-------------访问错误------------")
        print(ex)
        return get_request(url)
    return html


if __name__ == '__main__':
    get_url()
