#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2019/5/31  15:29
# @Author: 余浪人
# @Email:yulangren520@Gmail.com

import requests, re, time, random
from fake_useragent import UserAgent
from queue import Queue
from apps.lib.public import get_hash, batch_add
from apps.models.collector_model import Collect_Proxy_Model, Collect_History_Model, db
from apps.spiders.setting import Config

userAgent = UserAgent()
queue = Queue(5000)  # 最大url数


def IpProxy(app_key, xy=1):
    url = f'http://api.wandoudl.com/api/ip?app_key={app_key}&pack=205328&num=1&xy={xy}&type=2&lb=\r\n&port=4mr=2&'
    response = requests.get(url, timeout=5)
    try:
        data = response.json().get('data')[0]
        if data and response.status_code == 200:
            ip = data.get('ip')
            port = data.get('port')
            proxy = {"http://": str(ip) + ":" + str(port), "https://": str(ip) + ":" + str(port)}  # 代理
            return proxy
    except:
        return response.json().get('msg')  # 没有可用的套餐


def download_get(url, agr_id: int):
    headers = {
        "User-Agent": userAgent.random,
        "Referer": url,
    }
    while True:
        try:
            proxy_obj = Collect_Proxy_Model.query.filter().first()
            if proxy_obj.is_proxy and proxy_obj.app_key:
                # 查询数据库代理的设置
                try:
                    proxies = IpProxy(proxy_obj.app_key, agr_id)
                    response = requests.get(url=url, headers=headers, proxies=proxies, timeout=5)  # 设置有代理
                except:
                    print("代理IP异常!")
                    time.sleep(random.randint(1, 5))  # 随机延迟
                    continue
            else:
                response = requests.get(url=url, headers=headers)
            if response.status_code == 200:
                return response
        except:
            print("源码下载异常!")
            time.sleep(random.randint(1, 5))  # 随机延迟


def distinct_url_verify(url):
    redis = Config.Redis
    '''
    链接去重
    :return: 
    '''
    hash_str = get_hash(url)
    if redis.get(hash_str):
        return "已采集!"


def distinct_detail_write(url, title, superior):
    redis = Config.Redis
    '''
    链接去重写入
    :return: 
    '''
    hash_str = get_hash(url)
    if redis.get(hash_str):
        return "已采集!"
    else:
        if len(Collect_History_Model.query.filter(Collect_History_Model.identifying_code == hash_str).all()) == 0:
            batch_add(Collect_History_Model, db, [
                {"title": title, "superior": superior,
                 "identifying_code": hash_str}, ])  # 历史记录写入MySQL  优化  当Redis中存在时  不用再查询MySQL了
            redis.set(hash_str, f'{hash_str}', ex=(3600 * 24) * 30, nx=True)  # 过期时间 (秒)


def strReplace(string: str):
    try:
        data_s, data_e = string.split('[%TL%]')
    except:
        data_s, data_e = '', ''
    return data_s, data_e


def conversion_code(response):
    '''
    自动转码
    :param response:
    :return:
    '''
    response_code = (response.apparent_encoding).lower()
    if response_code == 'utf-8':
        data = (response.text)
    else:
        data = (response.text).encode("latin1").decode("gbk")
    return data


if __name__ == '__main__':
    x = requests.get('http://ip.chinaz.com/', proxies=IpProxy(''))
    print(x.text)
