#!/usr/bin/env python
# -*-coding: utf-8-*-
# AUTHOR ：Arthur
# DATE   ：2016-10-19
# INTRO  ：
# VERSION：0.1
import random
import re
from lxml import etree
from collections import defaultdict


class Rule(object):
    @staticmethod
    def xici(page):
        """
        :param page:要分析的西刺的网页
        :return: ip:port
        """
        tree = etree.HTML(page)
        try:
            proxies = []
            ip_list = tree.xpath('//table[@id="ip_list"]')[0]
            for each in ip_list[1:]:
                # 获取ip地址
                ip = each.xpath('td[2]/text()')[0]
                # 获取端口
                port = each.xpath('td[3]/text()')[0]
                ip_port = ip + ':' + port
                proxies.append(ip_port)
            return proxies
        except Exception as e:
            print('分析西刺代理页面出错:', e)

    @staticmethod
    def daixiang(page):
        """
        大象代理的api，处理后返回ip列表
        :param page: 大象代理的api
        :return: li列表
        """
        proxies = [ip.strip() for ip in page.split("\n")]
        return proxies

    @staticmethod
    def data_wash(ip_list):
        """
        列表内如果有ip相同的元素，只保留一个
        :param o:
        :return:
        """
        ips = defaultdict(list)
        for ip in ip_list:
            host = ip.strip().split(":")[0]
            port = ip.strip().split(":")[-1]
            ips[host].append(port)
        proxies = ['{}:{}'.format(host, random.choice(port)) for host, port in ips.items()]
        return proxies

    @staticmethod
    def peer_data(s):
        """
        将cache_peer格式的配置文件转化为ip:port列表
        :param s:从cache_peer配置文件中读取出来的所有数据
        :return:
        """
        proxies = []
        peers = s.strip().split("\n")
        for peer in peers:
            ipdate = re.findall('cache_peer (.*?)  parent (.*?) 0 proxy-only round-robin', peer, re.S)[0]
            ip = ipdate[0]
            port = ipdate[1]
            ipport = ip + ":" + port
            proxies.append(ipport)
        return proxies