# coding:utf-8
__author__ = "chenghao"

from gevent import monkey

monkey.patch_all()
from gevent.pool import Pool
from bottle import Bottle, request
import urllib2, logging
from bs4 import BeautifulSoup

sogou_news_app = Bottle()
pool = Pool(200)
sogou_headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",
    "Connection": "keep-alive",
    "Cookie": "CXID=1FF99DDE12C736A38DEE633C77CE5D83; SUV=000D55450EC41CEE570B923182D84091; ssuid=5581580960; SUID=5C5FC40E4B6C860A56AC5DF600093ECA; ABTEST=7|1464180866|v1; weixinIndexVisited=1; SNUID=D3E176BDB3B681189CF620FDB34EDC83; JSESSIONID=aaaEhMT-VuPp4hxb1oItv; IPLOC=CN1100",
    "Host": "weixin.sogou.com",
    "Upgrade-Insecure-Requests": 1,
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36",
}
weixin_headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Host": "mp.weixin.qq.com",
    "Referer": "weixin.sogou.com",
    "Upgrade-Insecure-Requests": 1,
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36",
}


@sogou_news_app.get("/")
def index():
    query = request.query.getunicode("query", "")
    page = request.query.getunicode("page", 1)

    if query:
        results = req_sogou(query, page)
        return {"code": 0, "results": results}
    else:
        return {"code": -1, "msg": "请输入搜索关键字"}


def req_sogou(query, page):
    url = "http://weixin.sogou.com/weixin?"
    url_param = "type=2&ie=utf8&_sug_=y&query=%s&page=%s" % (query, page)
    req = urllib2.Request(url + url_param, headers=sogou_headers)
    res = urllib2.urlopen(req, timeout=60).read()
    bs = BeautifulSoup(res, "lxml")
    news = bs.select(".results .wx-rb")
    jobs = pool.map(analyze_news, news)
    return jobs


def analyze_news(new):
    try:
        source_url = new.select(".img_box2 a")[0]["href"]
        cover_img = new.select(".img_box2 a img")[0]["src"]
        title = new.select(".txt-box h4 a")[0].get_text()
        source_name = new.find(id="weixin_account")["title"]
        intro = unicode(new.select(".txt-box p")[0])

        results = pool.spawn(get_article_content, source_url).get()
        time = results[0]
        content = results[1]

        res = {"source_url": source_url, "cover_img": cover_img, "source_name": source_name, "intro": intro,
               "title": title, "time": time, "content": content}

        return res
    except Exception, e:
        logging.error("解析新闻列表失败: " + str(e), exc_info=True)
        return {}


def get_article_content(source_url):
    try:
        req = urllib2.Request(source_url, headers=weixin_headers)
        res = urllib2.urlopen(req, timeout=60).read()
        bs = BeautifulSoup(res, "lxml")

        time = bs.find(id="post-date").string
        content = unicode(bs.select("#js_content")[0])

    except Exception, e:
        logging.error("获取文章内容失败: " + str(e), exc_info=True)
        time = ""
        content = ""

    return time, content