import pickle
from urldb import UrlDB
import time
import urllib.parse as urlparse


class UrlPool:
    '''URL Pool for crawler to manage URLs
    '''

    def __init__(self, pool_name):
        self.name = pool_name
        self.db = UrlDB(pool_name)

        self.waiting = {}  # {host: set([urls]), } 按host分组，记录等待下载的URL
        self.pending = {}  # {url: pended_time, } 记录已被取出（self.pop()）但还未被更新状态（正在下载）的URL
        self.failure = {}  # {url: times,} 记录失败的URL的次数
        self.failure_threshold = 3
        self.pending_threshold = 10  # pending的最大时间，过期要重新下载
        self.waiting_count = 0  # self.waiting 字典里面的url的个数
        self.max_hosts = ['', 0]  # [host: url_count] 目前pool中url最多的host及其url数量
        self.hub_pool = {}  # {url: last_query_time, }  存放hub url
        self.hub_refresh_span = 0
        self.load_cache()

    def __del__(self):
        self.dump_cache()

    def load_cache(self, ):
        path = self.name + '.pkl'
        try:
            with open(path, 'rb') as f:
                self.waiting = pickle.load(f)  # 从本地文件中读取上次退出时未处理的url
            cc = [len(v) for k, v in self.waiting.items()]
            # print('saved pool loaded! urls:', sum(cc))
        except:
            pass

    def dump_cache(self):
        path = self.name + '.pkl'
        try:
            with open(path, 'wb') as f:
                pickle.dump(self.waiting, f)  # 程序退出时将已取出的url保存到本地文件中避免丢失
            # print('self.waiting saved!')
        except:
            pass

    def set_hubs(self, urls, hub_refresh_span):  # 爬虫调用:urls-新闻列表页，hub_refresh_span-页面刷新间隔
        # print(f'[*]接受hub_url:{urls}')
        self.hub_refresh_span = hub_refresh_span
        self.hub_pool = {}
        for url in urls:
            self.hub_pool[url] = 0

    def set_status(self, url, status_code):  # pop方法取出url下载后，对url状态进行判断，写到redis中
        if url in self.pending:
            self.pending.pop(url)  # 从pending删除已下载完成的url

        if status_code == 200:
            self.db.set_success(url)  # 判断状态码，200设置url值设置为1
            return
        if status_code == 404:
            self.db.set_failure(url)  # 判断状态码，404(ip封禁)设置url值设置为0
            return
        if url in self.failure:  # 其他状态码将url放入failure中并将其值加1或设置为1，代表失败次数，避免偶然失败
            self.failure[url] += 1
            if self.failure[url] > self.failure_threshold:  # 如果某个url失败次数大于设定次数将其抛弃
                self.db.set_failure(url)  # url设置下载失败
                self.failure.pop(url)
            else:
                self.add(url)  # 从新添加到waiting中
        else:
            self.failure[url] = 1
            self.add(url)

    def push_to_pool(self, url):  # 把url放到waiting中
        host = urlparse.urlparse(url).netloc  # 获取url的host
        if not host or '.' not in host:  # 异常处理
            print('try to push_to_pool with bad url:', url, ', len of url:', len(url))
            return False
        if host in self.waiting:  # 判断此url的host是否已经出现过
            if url in self.waiting[host]:  # 判断host集合中是否已有此url
                return True
            self.waiting[host].add(url)  # 如果没有就加入到host集合中
            if len(self.waiting[host]) > self.max_hosts[1]:  # host中url数量判断
                self.max_hosts[1] = len(self.waiting[host])  # 将host中url数量最多的设置到max_host列表中
                self.max_hosts[0] = host
        else:
            self.waiting[host] = set([url])  # 如果不存在host就创建1个
        self.waiting_count += 1  # 字典中url数量加1
        return True

    def add(self, url, always=False):  # 向网址池中添加单个url always参数强制加入waiting
        # print(f'[*]加入urlpool:{url}')
        if always:
            return self.push_to_pool(url)
        pended_time = self.pending.get(url, 0)  # 判断url是否取出过，获取上一次的下载时间
        if time.time() - pended_time < self.pending_threshold:  # 判断下载间隔如果小于设定就不加入waiting中
            print('being downloading:', url)
            return
        if self.db.has(url):  # 判断redis中是否存在，存在就把url抛弃
            # print(f'\033[1;31m[*]already download: {url}\033[0m')
            return
        if pended_time:  # 当url在pending中且时间间隔大于设定值，对此url的pended_time重新设定
            self.pending.pop(url)
        return self.push_to_pool(url)

    def addmany(self, urls, always=False):  # add方法循环调用
        if isinstance(urls, str):
            print('urls is a str !!!!', urls)
            self.add(urls, always)
        else:
            for url in urls:
                self.add(url, always)

    def pop(self, count, hub_percent=50):  # 爬虫调用:从网址池中取出url count:获取url的总数 hub_percent:hub列表页所占取出的url的比例
        # print('[*]max of host:', self.max_hosts)

        # 取出的url有两种类型：hub=1, 普通=0
        url_attr_url = 0
        url_attr_hub = 1
        # 1. 首先取出hub，保证获取hub里面的最新url.
        hubs = {}  # 列表页集合
        hub_count = count * hub_percent // 100  # 获取列表页数量
        for hub in self.hub_pool:
            span = time.time() - self.hub_pool[hub]
            if span < self.hub_refresh_span:  # 判断hub的间隔时间是否小于设定间隔
                continue
            hubs[hub] = url_attr_hub  # 1 means hub-url
            self.hub_pool[hub] = time.time()
            if len(hubs) >= hub_count:  # 当hubs中的url足够就结束循环
                break

        # 2. 再取出普通url
        left_count = count - len(hubs)  # 获取取出普通url数量
        urls = {}
        for host in self.waiting:
            if not self.waiting[host]:  # 当waiting中没有值时跳出
                continue
            url = self.waiting[host].pop()  # 从host中取出url
            urls[url] = url_attr_url
            self.pending[url] = time.time()  # 将url添加进pending中，并设定下载时间
            if self.max_hosts[0] == host:  # 对max_list列表处理
                self.max_hosts[1] -= 1
            if len(urls) >= left_count:  # 达到数量跳出循环
                break
        self.waiting_count -= len(urls)  # url数量减1
        # print('[*]To pop:%s, hubs: %s, urls: %s, hosts:%s' % (count, len(hubs), len(urls), len(self.waiting)))
        urls.update(hubs)  # 字典合并
        # print(f'[*]{urls}')
        return urls  # 返回url

    def size(self, ):
        return self.waiting_count

    def empty(self, ):
        return self.waiting_count == 0
