# coding:utf-8
__author__ = 'chenghao'
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

from gevent import monkey

monkey.patch_all()
from gevent.pool import Pool
import urllib2, os, logging
from bottle import Bottle, request
from bs4 import BeautifulSoup
import conf


news_app = Bottle()
pool = Pool(200)


@news_app.get("/")
def get_param():
	pn = request.query.getunicode("pn", 1)
	q = request.query.getunicode("q", "")
	if q:
		results = req_360_so(pn, q)
		return {"code": 0, "results": results}
	else:
		return {"code": -1, "msg": "请输入搜索关键字"}


def req_360_so(pn, q):
	headers = {
		"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
		"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",
		"Cache-Control": "max-age=0",
		"Connection": "keep-alive",
		"Host": "news.so.com",
		"Upgrade-Insecure-Requests": 1,
		"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36"
	}
	url_val = "tn=news&rank=rank&j=0&pn=" + str(pn) + "&q=" + q
	req = urllib2.Request("http://news.so.com/ns?" + url_val, headers=headers)
	res = urllib2.urlopen(req, timeout=30).read()
	bs = BeautifulSoup(res, "lxml")
	news = bs.select("#news .res-list")
	jobs = pool.map(analyze_news, news)

	return jobs


def analyze_news(new):
	try:
		source_url = new.select("h3 a")[0]["href"]
		p_img = new.select(".pimg")
		cover_img = ""
		if p_img:
			cover_img = p_img[0].select("img")[0]["src"]
		source_name = new.find(class_="sitename").string
		post_time = new.find(class_="posttime")["title"]
		intro = unicode(new.find(class_="content"))
		source_name_pinyin = conf.pinyin(source_name)
		results = get_article_content(source_name_pinyin, source_url)
		title = results[0]
		content = results[1]

		res = {"source_url": source_url, "cover_img": cover_img, "source_name": source_name, "post_time": post_time,
			   "intro": intro, "title": title, "content": content}

		return res
	except Exception, e:
		logging.error("解析新闻列表失败: " + str(e), exc_info=True)
		return {}

article_content_headers = {
	"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
	"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",
	"Cache-Control": "max-age=0",
	"Connection": "keep-alive",
	"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36"
}


def get_article_content(source_name_pinyin, source_url):
	try:
		r = conf.c[source_name_pinyin]
		title_re = r["title_re"]
		content_re = r["content_re"]

		req = urllib2.Request(source_url, headers=article_content_headers)
		res = urllib2.urlopen(req, timeout=30).read()
		bs = BeautifulSoup(res, "lxml")

		title = bs.select(title_re)[0].get_text()
		content = unicode(bs.select(content_re)[0])

		replace_1 = r.get("replace_1")
		replace_2 = r.get("replace_2")
		if replace_1:
			if replace_2 == "./":
				res = os.path.split(source_url)
				content = content.replace(replace_1, res[0] + "/")
			if replace_2 == "./2":
				res = os.path.split(source_url)
				content = content.replace(replace_1, 'src="' + res[0] + "/")

	except Exception, e:
		logging.error("获取文章内容失败: " + str(e), exc_info=True)
		title = ""
		content = ""

	return title, content