import datetime
import json
import random
import re
import urllib

import chardet
import requests

# ip代理池url
proxy_ips_url = "http://ips.bigbiy.com/?giao=xiaoagiao"

requests.packages.urllib3.disable_warnings()

import ssl


# 获取ip代理池
def get_ips_proxy():
    try:
        res = requests.get(proxy_ips_url)
        res_json = json.loads(res.text)
        ips_list = res_json.get("data")
    except Exception as e:
        print("获取ip代理池错误")
        print(e)
        ips_list = []
    return ips_list


# 使用urllib获取html
def get_html_text_by_urllib(url):
    headers_list = [
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
        "Chrome/56.0.2924.76 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
    ]
    headers = {
        "User-Agent": random.choice(headers_list),
    }

    # 获取代理ip池
    ips_list = get_ips_proxy()
    if len(ips_list):
        one_ip_port = random.choice(ips_list)
    else:
        one_ip_port = 0

    proxy = {
        'http': one_ip_port
    }
    if one_ip_port:
        proxy_support = urllib.request.ProxyHandler(proxy)
        opener = urllib.request.build_opener(proxy_support)
        urllib.request.install_opener(opener)

    req = urllib.request.Request(url=url, headers=headers)

    context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
    f = urllib.request.urlopen(req, context=context)
    response = f.read()
    f.close()

    code_style = chardet.detect(response).get("encoding")
    try:
        html_text = response.decode(code_style, "ignore")
    except:
        print(datetime.datetime.now())
        print("encoding is error")
        return ''

    return html_text


# 根据url获取html详情
def get_html_text(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
    }

    # 获取代理ip池
    ips_list = get_ips_proxy()
    if len(ips_list):
        one_ip_port = random.choice(ips_list)
    else:
        one_ip_port = 0

    proxy = {
        'http': one_ip_port
    }

    print(proxy)

    try:
        if not one_ip_port:
            res = requests.get(url, headers=headers, verify=False)
        else:
            res = requests.get(url, headers=headers, proxies=proxy, verify=False)
    except Exception as e:
        print(e)
        print("一条记录不能解析%s" % (datetime.datetime.now()))
        return ""
    html_bytes = res.content
    code_style = chardet.detect(html_bytes).get("encoding")
    try:
        html_text = html_bytes.decode(code_style, "ignore")
    except:
        print(datetime.datetime.now())
        print("encoding is error")
        return ''
    return html_text


# 根据商品url获取商品详情图片
def get_goods_detail_by_itemurl_sellerid(itemurl):
    # html_text = get_html_text(itemurl)
    html_text = get_html_text_by_urllib(itemurl)

    all_jpgs_list = re.findall("\w+?\.\w+?\.com.+?\.jpg", html_text)

    # 遍历拼接url
    new_jpgs_list = []
    for one_jpg in all_jpgs_list:
        new_jpg = "https://" + one_jpg
        if "," not in new_jpg and "\\" not in new_jpg and not bool(re.search(r'\d+x\d+', new_jpg)) and \
                "gw.alicdn.com" not in new_jpg:
            new_jpgs_list.append(new_jpg)

    new_jpgs_list = list(set(new_jpgs_list))

    return new_jpgs_list
