#https://kumnit.com/feed
"""
python3环境, 注意要处理html代码的混淆
"""

import re
import requests
from lxml import etree

class baseSpider:
    pass

class kumnitSpider(baseSpider):
    media_id = 194
    # (TODO) 爬虫名命名和类名一致
    name = 'kumnitSpider'
    # (TODO) 网站网址加入allowed_domains
    allowed_domains = ['kumnit.com']
    # (TODO) 网站rss链接加入urls_channel, "未分类频道"不变
    urls_channel = [("https://kumnit.com/feed", "未分类频道")]

    def get_item_urls(self, response):
        """
        获取文章链接列表
        Args:
            response (requests.Response): requests请求
        Returns:
            item_urls (list): 文章链接列表
        """
        #res=etree.HTML(response.content)
        item_urls = []
        item_thumbs = []
        # 对于rss来源的一般把文章链接和缩略图一起获取, 如果获取失败，则把文章内第一张图片作为缩略图
        item = re.findall('<item>.*?<link>(.*?)</link>.*?src="(.*?)".*?</content:encoded>', response.text, re.S)
        #print("*"*10)
        #print(item)
        # for i in item: 
        #     url, thumb = i
        #     item_urls.append(url)
        #     item_thumbs.append(thumb)
        # self.thumbs = item_thumbs
        # print(item_urls)
        # #return item_urls

        res=etree.HTML(response.content)
        self.thumbs=[x.get('src') for x in res.xpath('//encoded/p[img][1]/img[1]')]
        #print([x.tail.strip('\n\t') for x in res.xpath('.//item/link')])
        return [x.tail.strip('\n\t') for x in res.xpath('.//item/link')]




        
        #self.thumbs=[x for x in res.xpath('//channel/item/description/')]
        # self.thumbs=[]
        # print(self.thumbs)
        # return [x.tail.strip('\n\t') for x in res.xpath('//encoded/p[img][1]/../../link')]



    def get_thumbs(self, response):
        """
        获取文章缩略图列表，和文章链接列表对应
        Args:
            response (requests.Response): requests请求
        Returns:
            thumbs (list): 缩略图链接列表
        """
        # (TODO) thumbs需要加上thumb=1参数
        thumbs = [i+"&thumb=1" if "?" in i else i+"?thumb=1" for i in self.thumbs]
        return thumbs

    def get_thumb(self, response):
        # (TODO) 如果thumbs获取不了，在此从文章页获取第一张图片
        res=etree.HTML(response.content)
        img_url=res.xpath('.//meta[@name="twitter:image"]')[0].get('content')

        return img_url


    def get_title(self, response):
        """
        文章页获取标题
        Args:
            response (requests.Response): requests请求
        Returns:
            title (str): 文章标题
        """
        #num=re.findall('(?<=id\="post-).*?(?=")', response.text, re.S)[0]
        response = etree.HTML(response.text)
        return response.xpath('.//h1')[0].text


    def get_author(self, response):
        # (TODO) 作者名和采集文档网站名一致
        return "kumnit"


    def get_release_time(self, response):
        """
        文章页获取文章发布时间
        Args:
            response (requests.Response): requests请求
        Returns:
            release_time (str): 文章发布时间
        """
        response = etree.HTML(response.text)
        ctime = response.xpath('//time')[0].get('datetime')
        return ctime


    def get_content(self, response):
        """
        文章页获取正文
        要求:
            1. 正文+图片
            2. 滤掉广告
            3. img标签只保留src属性，如有srcset、data-src、class的需要过滤
        Args:
            response (requests.Response): requests请求
        Returns:
            content (str): 正文
        """
        num=re.findall('(?<=id\="post-).*?(?=")', response.text, re.S)[0]
        response = etree.HTML(response.text)
        content = response.xpath(f'.//*[@class="tdb-block-inner td-fix-index"]/p')#//*[@id="mvp-content-main"]
        
        content = "".join([etree.tounicode(i) for i in content])
        #print(content)
        # 根据实际情况替换属性，只保留src属性
        useless_list = []
        re_words = ['<a href=.*?>',
                     'srcset=".*?"', 
                     'sm-src=".*?"', 
                     'big-src=".*?"', 
                     'class=".*?"', 
                     'alt=".*?"', 
                     'width=".*?"', 
                     'height=".*?"',
                     'sizes=".*?"',
                     '<script>.*?</script>',
                     '<div>.*?</div>',
                     ]
        for word in re_words:
            t=re.findall(word, content, re.S)
            useless_list.extend(t)   
        for i in useless_list:
            content = content.replace(i, '')
        return content

    def get_inner_images(self, response, content=''):
        # (TODO) 默认不变，自动获取img/@src
        content = etree.HTML(content)
        #print(content)
        inner_images = content.xpath('.//img/src')
        return inner_images

headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/41.0.2227.1 Safari/537.36',
    'Cache-Control': 'max-age=0',
}


if __name__ == "__main__":
    spider = kumnitSpider()
    response = requests.get(spider.urls_channel[0][0], headers=headers)#打开网页
    item_urls = spider.get_item_urls(response)#获取文章链接
    # print(item_urls)
    thumbs = spider.get_thumbs(response)#获取图片链接
    for index,url in enumerate(item_urls):#遍历文章链接
        print("----------------------------------------------------")
        response = requests.get(url, headers=headers)
        if not thumbs:
            thumb = spider.get_thumb(response)#
        else:
            #thumb = thumbs[index]#图片列表等于当前第i条图片列表
            thumb = spider.get_thumb(response)#
        print("url:", url)#打印url
        print("thumb:", thumb)#打印图片url
        print("title:", spider.get_title(response))#获取文章标题
        print("release_time:", spider.get_release_time(response))#获取发表时间
        content = spider.get_content(response)#获取正文
        print("inner_images:", spider.get_inner_images(response, content))#获取img
        print(content)
       # break