import requests
from lxml import etree
import re
def demo1():
    url='https://so.jstv.com/?'
    headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'}
    param={
        'keyword':'新冠肺炎疫情',
        'page':'1'
    }
    response = requests.get(url=url, params=param, headers=headers)
    text=response.text
    with open('./test1.html','w',encoding='utf-8') as fp:
        fp.write(text)

def demo2():
    url='http://news.jstv.com/a/20201204/1607071977419.shtml'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'}
    page_text=requests.get(url=url,headers=headers).content

    tree=etree.HTML(page_text)

    title=tree.xpath('/html/body/div[@class="bd clearfix"]/div[1]//div[@class="content"]//p/text()')
    print(title)

def demo3():
    url='https://news.sina.com.cn/o/2020-12-29/doc-iiznezxs9504163.shtml'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'}
    page_text = requests.get(url=url, headers=headers).content

    tree = etree.HTML(page_text)
    text_list = tree.xpath('//div[@class="article"]//p/text()')
    text_list.pop(0)
    result=''
    for text in text_list:
        result=result.strip()+text
    return result



def getLiZhi():
    url='https://so.jstv.com/?'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'}
    with open('./LiZhi_data.txt', 'a+', encoding='utf-8') as fp:
        for page in range(1,209):

            param={
                'keyword':'新冠疫情',
                'page':str(page)

            }
            page_text = requests.get(url=url,params=param, headers=headers).content
            tree = etree.HTML(page_text)
            li_list=tree.xpath('/html/body//div[@class="lzxw_lxz"]/ul/li')#找到所有内容页网页链接

            for li in li_list:
                # 注意xpath拿出来的全是list
                title = li.xpath('./span/a/text()')#这里title从搜索页拿，具体内容页也可以拿
                url_son = li.xpath('./span/a/@href')#具体内容页地址


                page_text_son = requests.get(url=url_son[0],headers=headers).content
                tree_son = etree.HTML(page_text_son)
                text_list=tree_son.xpath('/html/body/div[@class="bd clearfix"]/div[1]//div[@class="content"]//p/text()')
                if (len(text_list)<=1):
                    continue
                text_list.pop(0)#第一个全是空字符
                time = tree_son.xpath('//span[@class="time"]/text()')
                fp.write(time[0]+'@timeDone')
                fp.write(title[0]+'@titleDone')
                for text in text_list:
                    fp.write(text)
                fp.write('@done\n')

