# coding=utf8
import redis
import requests
import time
from pymongo import MongoClient
from uritools import urisplit
from bs4 import BeautifulSoup

redis_model = redis.StrictRedis()

client = MongoClient(host='192.168.253.129')
db = client['Baike']
content_coll = db['Resource']
block_dict_coll = db['BlockDict']
polysemant_coll = db['Polysemant']

filter_key = 'Baike:PageSpider:UrlFilter'
pool_key = 'Baike:PageSpider:SeedPool'

allowed_domain = "baike.baidu.com"

# 获取block字典
block_dict = block_dict_coll.find()
block_rules = []
for rule in block_dict:
    block_rules.append(rule['Keyword'])


def str_to_time(str, format="%Y-%m-%d"):
    time_arr = time.strptime(str, format)
    return int(time.mktime(time_arr))


def is_block(test_url):
    to_block = False
    for rule in block_rules:
        if rule in test_url:
            to_block = True
            break
    return to_block


def filter_useless(tag):
    if tag.name != 'a' or not tag.has_attr('href'):
        return False
    if tag.has_attr('rel') and tag['rel'] != 'nofollow':
        return False
    if tag['href'] == '' or tag['href'].startswith('javascript') or tag['href'].startswith('#'):
        return False
    if is_block(tag['href']):
        return False
    url = urisplit(tag['href'])
    if url.gethost() is not None and url.gethost() != allowed_domain:
        return False
    return True


def get_page(resp_url):
    print(resp_url)
    try:
        r = requests.get(resp_url, allow_redirects=False, timeout=15)
        r.encoding = r.apparent_encoding
    except:
        return False
    # 进行过滤
    if r.status_code > 200:
        return False
    print("开始分析...")

    soup = BeautifulSoup(r.text, 'lxml')

    for link in soup.find_all(filter_useless):
        url_parsed = urisplit(link['href'])
        if url_parsed.gethost() is None:
            url = "http://" + allowed_domain + link['href']
        else:
            url = link['href']
        # 去掉hash
        if url.find('#') > 0:
            url = url[:url.find('#')]
        if redis_model.sadd(filter_key, url) > 0:
            redis_model.lpush(pool_key, url)

    if soup.find(class_='main-content') is None:
        return False
    print("准备入库")
    # check polysemant
    check_polysemant = False
    polysemant_div = soup.find(class_='polysemant-list polysemant-list-normal')
    if polysemant_div is not None:
        print("存在多义词")
        check_polysemant = True
        keyword = polysemant_div.find(class_='polysemantList-header-title').find('b').get_text()
        polysemant_item = polysemant_coll.find_one({'Keyword': keyword})
        if polysemant_item is None:
            # todo 暂时不进行解析
            polysemant_id = polysemant_coll.insert_one(
                {
                    "Keyword": keyword,
                    "Content": str(polysemant_div.find('ul'))
                }
            ).inserted_id
        else:
            polysemant_id = polysemant_item['_id']

    # make item
    save_item = dict()
    save_item['url'] = resp_url
    save_item['title'] = soup.find('title').get_text().replace('_百度百科', '')
    # todo 考虑取消括号?
    save_item['content'] = str(soup.find(class_='main-content'))
    time_str = soup.find('span', class_='j-modified-time').get_text()
    save_item['update_time'] = str_to_time(time_str)
    if check_polysemant:
        save_item['Polysemant'] = polysemant_id
    content_coll.insert_one(save_item)
    print("分析完毕...")


def get_seeds_from_redis():
    seeds_list = []
    while True:
        seed = redis_model.lpop(pool_key)
        if seed is None:
            if len(seeds_list) > 0:
                return seeds_list
            # 如果队列为空,就阻塞
            key, seed = redis_model.blpop(pool_key)
        seeds_list.append(seed.decode('utf8'))
        if len(seeds_list) > 40:
            return seeds_list


if __name__ == '__main__':
    # 防止为空,默认塞个
    redis_model.lpush(pool_key, "http://baike.baidu.com/")
    '''
    from threadpool import ThreadPool, makeRequests
    # 开启多线程
    pool = ThreadPool(30)
    while True:
        # 获取种子链接
        seeds = get_seeds_from_redis()
        requests = makeRequests(get_page, seeds)
        [pool.putRequest(req) for req in requests]
        pool.wait()
    '''
    while True:
        key, target_url = redis_model.blpop(pool_key)
        get_page(target_url.decode())
