import requests
from lxml import etree
import time
import uuid

news_list = []


class Spider_keji():
    def __init__(self):
        self.url_root = "http://www.cnetnews.com.cn/list-7-1-0-0-%s-0.htm"

    def getObjById(self, id):
        isdigit_ = True
        if type(id) != int:
            isdigit_ = id.isdigit()
        if not isdigit_:
            return None
        newsObj = news_list[int(id)]
        return newsObj

    def run(self):
        print("开始抓取新闻数据~~~~~~~~~~~~")
        for i in range(1, 10):
            self.spiderHandle(self.url_root % str(i))
            print("抓取第 %s 页新闻数据" % i)

    def spiderHandle(self, url):
        global news_list

        respose = requests.get(url)
        html = respose.content.decode("gbk").encode("utf8").decode("utf8")
        etree_html = etree.HTML(html)
        loop_selects = etree_html.xpath(u'//div[@class="news-loop clearfix"]')
        for loop_select in loop_selects:
            news_map = {}
            time = loop_select.xpath('div[@class="news-time"]')[0].text
            url = loop_select.xpath('div[@class="news-img"]/a')[0].attrib["href"]
            img_url = loop_select.xpath('div[@class="news-img"]/a/img')[0].attrib["src"]

            news_select = loop_select.xpath('div[@class="news-cont"]')[0]
            title = news_select.xpath("h2/a")[0].text
            abstract = news_select.xpath("p[@class='abstract']")[0].text

            news_map["time"] = time
            news_map["url"] = url

            news_map["img_url"] = img_url
            news_map["title"] = title
            news_map["abstract"] = abstract
            news_map["id"] = len(news_list)
            news_list.append(news_map)
            # self.getNewsDetail(news_map["id"])

    def getNewsDetail(self, id):
        newsObj = self.getObjById(id);
        if not newsObj:
            return None
        detail_old = newsObj.get("detail", None)
        if detail_old:
            return detail_old

        url = newsObj.get("url", None)
        if not url:
            return None
        res = requests.get(url)
        html = res.content.decode("gbk").encode("utf8").decode("utf8")
        etree_html = etree.HTML(html)
        quocn = etree_html.xpath('//div[@class="qu_ocn"]')
        context = etree.tostring(quocn[0], method='html', encoding="utf8").decode("utf8")
        source = etree_html.xpath('//div[@class="qu_zuo"]/p[1]')[0].text
        detail = {}
        detail["context"] = context
        detail["source"] = source
        detail["title"] = newsObj.get("title", None)
        detail["time"] = newsObj.get("time", None)
        newsObj["detail"] = detail

        for i in range(1, 100):
            self.setNewsComment(id, None, "人造评论====%s" % i)
        return detail

    def getNewsComment(self, id, start, end):
        newsObj = self.getObjById(id);
        if not newsObj:
            return None

        comments = newsObj.get("comment", [])
        result = comments[start:end]
        return result

    def setComment(self, id, name, context):
        newsObj = self.getObjById(id)
        comments = newsObj.get("comment", None)
        if not comments:
            newsObj["comment"] = comments = []

        if not name:
            name = "匿名用户"

        id = str(uuid.uuid1())

        comment = {
            "name": name,
            "context": context,
            "time": time.strftime('%Y-%m-%d %H:%M:%S'),
            "id": id
        }
        comments.insert(0, comment)
        return comment

#sp = Spider_keji()
