import configparser
import time
import os
import requests
import re
import json
from lxml import etree
from multiprocessing import Pool
import logging
import redis

logging.basicConfig(filename='proxies.log',
                    format='%(asctime)s,%(lineno)d,%(levelname)s,%(message)s',
                    datefmt='%Y%m%d%H%M%S',
                    level=logging.DEBUG)
log = logging.getLogger('proxies')

pool = redis.ConnectionPool(host='localhost', port=63790, db=0)
r = redis.StrictRedis(connection_pool=pool)

# 已经验证完毕的代理
AVAILABLE = 'AVAILABLE'
# 等待验证的代理 管道
PROXIES_VERIFY = 'PROXIES:VERIFY'
# 网站 管道
WEBSITE = 'WEBSITE'
# headers
HEADERS = 'HEADERS'


def compound():
    
    # 验证获取到的代理的正确性 普通还是高匿 是否可用
    while True:
        log.info('anwe available proxy')
        old = AVAILABLE + ':old'
        r.rename(AVAILABLE, old)
        availables = r.smembers(old)
        for proxy in availables:
            r.publish(PROXIES_VERIFY, proxy)
        log.info('trunc avilable')
        time.sleep(12 * 60 * 60)


def verify():
    # 验证 代理的正确性 并将正确的代理保存在数据库中
    p = r.pubsub()
    p.subscribe(PROXIES_VERIFY)

    for proxy in p.listen():
        if proxy['type'] != 'message':
            continue

        proxy = json.loads(proxy['data'])
        try:
            res = requests.get("http://httpbin.org/ip", timeout=10,
                               proxies={proxy['protocol']: proxy['ip'] + ":" + proxy['port']})
        except Exception as e:
            continue
        encoding = 'utf-8' if res.apparent_encoding is None else res.apparent_encoding
        html = res.content.decode(encoding)

        if re.search(proxy['ip'], html):
            proxy['hidden'] = 1
        else:
            proxy['hidden'] = 0
        # (protocol, ip, port, update_time, err_count, hidden)
        log.info('verify {}'.format(proxy))
        r.sadd(AVAILABLE, json.dumps(proxy))


def handle_url():
    p = r.pubsub()
    p.subscribe(WEBSITE)
    for website in p.listen():
        if website['type'] != 'message':
            continue
        website = json.loads(website['data'])
        for url in website['urls']:
            header = {"User-Agent": r.srandmember(HEADERS)}
            try:
                response = requests.get(url, headers=header, timeout=5)
            except Exception as e:
                log.error('url {} is error {}'.format(url, e))
                continue
            html = etree.HTML(response.content.decode(response.encoding))
            ips = [_.replace(' ', '').replace('\r', '').replace('\n', '') for _ in html.xpath(website['ip'])]
            ports = [_.replace(' ', '').replace('\r', '').replace('\n', '') for _ in html.xpath(website['port'])]
            if website['protocol'] == 'http' or website['protocol'] == 'https':
                protocols = [website['protocol'] for _ in range(len(ips))]
            else:
                protocols = [_.replace(' ', '').replace('\r', '').replace('\n', '') for _ in
                             html.xpath(website['protocol'])]
            for i in range(len(ips)):
                # data5u protocol 可能会出现 http,https
                for protocol in protocols[i].lower().split(','):
                    ip = ips[i]
                    port = ports[i]
                    if protocol == 'yes':
                        protocol = 'https'
                    elif protocol == 'no':
                        protocol = 'http'
                    # 将其放在需要验证的代理中
                    log.info("{}://{}:{}".format(protocol, ip, port))
                    proxy = {'protocol': protocol, 'ip': ip, 'port': port, 'website': website['website']}
                    r.publish(PROXIES_VERIFY, json.dumps(proxy))


def init_conf():
    '''
    加载配置文件
    生成需要爬取的网站,获取方式
    加载headers
    :return:
    '''
    count = 1
    while True:
        log.info('init config {}'.format(count))
        count += 1
        conf = configparser.ConfigParser()
        conf.read('./proxies.ini')
        for section in conf.sections():
            # 初始化网站
            if section.startswith('website'):
                website = {'urls': [], 'website': section.split('-')[1]}
                for key, value in conf.items(section):
                    if key.startswith('url-'):
                        url = value.split(',')
                        if len(url) == 1:
                            website['urls'].append(url[0])
                        elif len(url) == 3:
                            website['urls'].extend([url[0].format(_) for _ in range(int(url[1]), int(url[2]))])
                    elif key == 'rule-protocol':
                        website['protocol'] = conf.get(section, key)
                    elif key == 'rule-ip':
                        website['ip'] = conf.get(section, key)
                    elif key == 'rule-port':
                        website['port'] = conf.get(section, key)
                r.publish(WEBSITE, json.dumps(website))
            # 加载headers
            elif section == 'headers':
                for key, value in conf.items(section):
                    r.sadd(HEADERS, value)
        time.sleep(60 * 60)


if __name__ == '__main__':

    pool = Pool(4)
    pool.apply_async(func=init_conf, args=())
    pool.apply_async(func=compound, args=())

    pool.apply_async(func=handle_url, args=())
    pool.apply_async(func=verify, args=())

    pool.close()
    pool.join()
