#https://www.business-cambodia.com/latest-news
"""
python3环境, 注意要处理html代码的混淆
"""

import re
import requests,random
from lxml import etree

class baseSpider:
    pass

class businesscambodiaSpider(baseSpider):
    media_id = 194
    # (TODO) 爬虫名命名和类名一致
    name = 'business-cambodia'
    # (TODO) 网站网址加入allowed_domains
    allowed_domains = ['www.business-cambodia.com']
    # (TODO) 网站rss链接加入urls_channel, "未分类频道"不变
    urls_channel = [("https://www.business-cambodia.com/latest-news", "未分类频道")]

    def get_item_urls(self, response):
        """
        获取文章链接列表
        Args:
            response (requests.Response): requests请求
        Returns:
            item_urls (list): 文章链接列表
        """
        #res=etree.HTML(response.content)
        # item_urls = []
        # item_thumbs = []
        # # 对于rss来源的一般把文章链接和缩略图一起获取, 如果获取失败，则把文章内第一张图片作为缩略图
        # item = re.findall('<item>.*?<link>(.*?)</link>.*?src="(.*?)".*?</content:encoded>', response.text, re.S)
        #print("*"*10)
        #print(item)
        # for i in item: 
        #     url, thumb = i
        #     item_urls.append(url)
        #     item_thumbs.append(thumb)
        # self.thumbs = item_thumbs
        # print(item_urls)
        # #return item_urls

        res=etree.HTML(response.content)

        
        self.thumbs=[x.get("src") for x in res.xpath('.//div[@class="page_image_list"]/a/img')]
        #print(self.thumbs)
        #print([x.get("href") for x in res.xpath('.//div[@class="page_image_list"]/a')])
        return [x.get("href") for x in res.xpath('.//div[@class="page_image_list"]/a')]




        
        #self.thumbs=[x for x in res.xpath('//channel/item/description/')]
        # self.thumbs=[]
        # print(self.thumbs)
        # return [x.tail.strip('\n\t') for x in res.xpath('//encoded/p[img][1]/../../link')]



    def get_thumbs(self, response):
        """
        获取文章缩略图列表，和文章链接列表对应
        Args:
            response (requests.Response): requests请求
        Returns:
            thumbs (list): 缩略图链接列表
        """
        # (TODO) thumbs需要加上thumb=1参数
        thumbs = [i+"&thumb=1" if "?" in i else i+"?thumb=1" for i in self.thumbs]
        return thumbs

    def get_thumb(self, response):
        # (TODO) 如果thumbs获取不了，在此从文章页获取第一张图片
        res=etree.HTML(response.content)
        img_url=res.xpath('.//meta[@name="twitter:image"]')[0].get('content')

        return img_url


    def get_title(self, response):
        """
        文章页获取标题
        Args:
            response (requests.Response): requests请求
        Returns:
            title (str): 文章标题
        """
        #num=re.findall('(?<=id\="post-).*?(?=")', response.text, re.S)[0]
        response = etree.HTML(response.text)
        return response.xpath('.//h2')[0].text.replace(' ','')


    def get_author(self, response):
        # (TODO) 作者名和采集文档网站名一致
        return "business-cambodia"


    def get_release_time(self, response):
        """
        文章页获取文章发布时间
        Args:
            response (requests.Response): requests请求
        Returns:
            release_time (str): 文章发布时间
        """
        response = etree.HTML(response.text)
        ctime = response.xpath('.//ul[@class="nav meta"]/li/a')[1].text
        # for i in ctime:
        #     print(i.text)
        return ctime


    def get_content(self, response):
        """
        文章页获取正文
        要求:
            1. 正文+图片
            2. 滤掉广告
            3. img标签只保留src属性，如有srcset、data-src、class的需要过滤
        Args:
            response (requests.Response): requests请求
        Returns:
            content (str): 正文
        """
        # num=re.findall('(?<=id\="post-).*?(?=")', response.text, re.S)[0]
        response = etree.HTML(response.text)
        content = response.xpath(f'.//div[@class="post--content"]/p')+response.xpath(f'.//div[@class="post--content"]/./img')
        content = "".join([etree.tounicode(i) for i in content])
        #print(content)
        # 根据实际情况替换属性，只保留src属性
        useless_list = []
        re_words = ['<a href=.*?>',
                     'srcset=".*?"', 
                     'sm-src=".*?"', 
                     'big-src=".*?"', 
                     'class=".*?"', 
                     'alt=".*?"', 
                     'width=".*?"', 
                     'height=".*?"',
                     'sizes=".*?"',
                     '<script>.*?</script>',
                     '<div>.*?</div>',
                     "  "
                     ]
        for word in re_words:
            t=re.findall(word, content, re.S)
            useless_list.extend(t)   
        for i in useless_list:
            content = content.replace(i, '')
        return content

    def get_inner_images(self, response, content=''):
        # (TODO) 默认不变，自动获取img/@src
        num=re.findall('(?<=src\=").*?(?=")', content, re.S)
        #print(num)
        content = etree.HTML(content)
        #print(content)
        inner_images = content.xpath('.//img/src')
        return num
user_agent = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
    "UCWEB7.0.2.37/28/999",
    "NOKIA5700/ UCWEB7.0.2.37/28/999",
    "Openwave/ UCWEB7.0.2.37/28/999",
    "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
    # iPhone 6：
	"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",

]

headers={
#'Host':'www.foxsports.com.kh',
#'Connection':'keep-alive',
#'Cache-Control':'max-age=0',
#'Upgrade-Insecure-Requests':'1',
'User-Agent':random.choice(user_agent),
#'Sec-Fetch-User':'?1',
#'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
#'Sec-Fetch-Site':'none',
#'Sec-Fetch-Mode':'navigate',
#'Accept-Encoding':'gzip, deflate, br',
#'Accept-Language':'zh-CN,zh;q=0.9',
#'If-None-Match':'W/"e5e2556ceb563dff31d6762633026036"',
#'If-Modified-Since':'Thu, 12 Mar 2020 04:30:54 GMT',
}


if __name__ == "__main__":
    spider = businesscambodiaSpider()
    response = requests.get(spider.urls_channel[0][0], headers=headers)#打开网页

    item_urls = spider.get_item_urls(response)#获取文章链接

    thumbs = spider.get_thumbs(response)#获取图片链接
    for index,url in enumerate(item_urls):#遍历文章链接
        print("----------------------------------------------------")
        response = requests.get(url, headers=headers)
        if not thumbs:
            thumb = spider.get_thumb(response)#
        else:
            #thumb = thumbs[index]#图片列表等于当前第i条图片列表
            thumb = thumbs[index]
        print("url:", url)#打印url
        print("thumb:", thumb)#打印图片url
        print("title:", spider.get_title(response))#获取文章标题
        print("release_time:", spider.get_release_time(response))#获取发表时间
        content = spider.get_content(response)#获取正文
        print("inner_images:", spider.get_inner_images(response, content))#获取img
        print(content)
        #break