# -*- coding: utf-8 -*-

import re
from SingleNodeSpider.spiders.base_spider import baseSpider
from scrapy import cmdline
from lxml import etree

# (TODO) 类名命名格式: 网站名+Spider
class lareineSpider(baseSpider):
    media_id = 194
    # (TODO) 爬虫名命名和类名一致
    name = 'lareineSpider'
    # (TODO) 网站网址加入allowed_domains
    allowed_domains = ['www.lareine.com.kh']
    # (TODO) 网站rss链接加入urls_channel, "未分类频道"不变
    urls_channel = [
        ("https://www.lareine.com.kh/news/feed/", "未分类频道")
    ]
    
    
    def get_item_urls(self, response):
        res = etree.HTML(response.body)
        self.thumbs = [x.get('src') for x in res.xpath('//encoded/p[img][1]/img[1]')]
        item_urls = [x.tail.strip('\n\t') for x in res.xpath('//encoded/p[img][1]/../../link')]
        return item_urls
    
    
    def get_thumbs(self, response):
        # (TODO) thumbs需要加上thumb=1参数
        thumbs = [i+"&thumb=1" if "?" in i else i+"?thumb=1" for i in self.thumbs]
        return thumbs

    def get_thumb(self, response):
        # (TODO) 如果thumbs获取不了，在此从文章页获取第一张图片
        return ''
    
    
    def get_title(self, response):
        title = response.xpath('//h1/text()').extract()[0]
        return title
        
    
    def get_author(self, response):
        # (TODO) 作者名和采集文档网站名一致
        return "Lariene"
        
    
    def get_release_time(self, response):
        response = etree.HTML(response.body)
        ctime = response.xpath('//time')[0].get('datetime')
        return ctime


    def get_content(self, response):
        content = "".join(response.xpath('//div[@class="td-post-content tagdiv-type"]/p').extract())
        # 根据实际情况替换属性，只保留src属性
        useless_list = []
        re_words = ['<a href=.*?>', 'srcset=".*?"', 'sm-src=".*?"', 'big-src=".*?"', 'class=".*?"', 'alt=".*?"',
                    'width=".*?"', 'height=".*?"', 'data-lazy-', 'data-lazy-sizes=', 'sizes=".*?"', 'type=".*?"',
                    'data-recalc-dims=".*?"']
        for word in re_words:
            t = re.findall(word, content, re.S)
            useless_list.extend(t)
        for i in useless_list:
            content = content.replace(i, '')
        return content
    
    def get_inner_images(self, response, content=''):
        # (TODO) 默认不变，自动获取img/@src
        content = etree.HTML(content)
        inner_images = content.xpath('.//img/@src')
        return inner_images

if __name__ == "__main__":
    cmdline.execute("scrapy crawl lareineSpider".split())