# coding:utf-8
__author__ = 'chenghao'

from gevent import monkey

monkey.patch_all()
from gevent.pool import Pool
from bottle import Bottle, request
import urllib2, json
from lxml import html
import lxml.etree as etree

toutiao_news_app = Bottle()
pool = Pool(200)

toutiao_headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Host": "toutiao.com",
    "Upgrade-Insecure-Requests": 1,
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
}


@toutiao_news_app.get("/")
def index():
    query = request.query.getunicode("query", "")
    page = request.query.getunicode("page", 1)

    if query:
        page = int(page) * 10
        results = req_toutiao(query, page)
        return {"code": 0, "results": results}
    else:
        return {"code": -1, "msg": "请输入搜索关键字"}


def req_toutiao(query, page):
    url = "http://toutiao.com/search_content/?"
    url_param = "format=json&count=10&keyword=%s&offset=%s" % (query, page)
    req = urllib2.Request(url + url_param, headers=toutiao_headers)
    res = urllib2.urlopen(req, timeout=60).read()
    json_s = json.loads(res)
    message = json_s["message"]
    if message == "success":
        data = json_s["data"]

        jobs = pool.map(analyze_news, data)
        return jobs

    return []


def analyze_news(new):
    title = new.get("title", "")
    datetime = new.get("datetime", "")
    image_list = new.get("image_list", "")
    source = new.get("source", "")
    article_url = new.get("article_url", "")
    abstract = new.get("abstract", "")
    content = pool.spawn(get_article_content, article_url).get()
    return {"title": title, "datetime": datetime, "image_list": image_list, "source": source,
            "article_url": article_url, "abstract": abstract, "content": content}


def get_article_content(source_url):
    if "toutiao.com" in source_url:
        req = urllib2.Request(source_url, headers=toutiao_headers)
        res = urllib2.urlopen(req, timeout=60).read()
        page = html.fromstring(res.decode('utf-8', 'ignore'))
        content = page.xpath(u'//*[@id="pagelet-article"]/div[@class="article-content"]')[0]
        return etree.tounicode(content)
    else:
        return ""
