import requests
from lxml import etree
import time
import re

#2020.12.22新浪一次显示的页数有问题，要手动改时间
#2020.12.29实现了全自动，但是换时间时会出现一次重复

#获得评论界面的newsid
def getCommentWeb(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'}
    page_text = requests.get(url=url, headers=headers).content
    #with open('./test1.html', 'w', encoding='utf-8') as fp:
    #    fp.write(text)
    tree = etree.HTML(page_text)
    web_list=tree.xpath('//meta[@name="comment"]/@content')
    if len(web_list)<1:
        return 'strange web'
    return (web_list[0].split(':')[1])

#通过搜索页面获取新闻网址，一次获取10条（num的大小）
#return 网址字符串list


def getUrl(page,etime):
    urllist=[]
    url='https://search.sina.com.cn/?'
    param={
        'q': '肺炎',
        'c': 'news',
        'range': 'all',
        'time': '2020',
        'stime': '2020-01-01 00:00:00',
        'etime': etime,
        'num': '10',
        'col': '1_3',
        'page': str(page)
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'}
    page_text = requests.get(url=url, params=param, headers=headers).text
    tree = etree.HTML(page_text)
    list1=tree.xpath('//div[@class="box-result clearfix"]')
    if len(list1)<1:
        return 'strange web'

    list2=list1[0]
    addr = list2.xpath('//a[1]/@href')

# 这里有局限性，只能对https://news开头的有效
    start=8
    for i in range(0,10):
        target=addr[start]
        if(target[0:12]!='https://news'):#只保留https://news开头的
            start += 2
            continue
        urllist.append(addr[start])
       # print(addr[start])
        start+=2
    print(page)
    return urllist


#根据newsid获取正文，评论
def getComments(nesID,texturl):
    url='http://comment5.news.sina.com.cn/page/info?'
    headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'}
    param={
        'format': 'json',
        'channel': 'gn',
        'newsid': nesID
    }
    response = requests.get(url=url, params=param, headers=headers)
    list_data = response.json()
    result=list_data['result']
    if not result.get('cmntlist'):# 无评论
        return
    cmntlist=result['cmntlist']
    count=result["count"]
    show=count["show"]
    total_people=count["total"]
    news=result['news']
    title=news['title']
    time=news['time']
    with open('./Sina_data.txt','a+',encoding='utf-8')as fp:
        fp.write("\n")
        fp.write(title+'^&')#^&用于间隔不同评论
        fp.write(time+'^&')
        page_text = requests.get(url=texturl, headers=headers).content

        tree = etree.HTML(page_text)
        text_list = tree.xpath('//div[@class="article"]//p/text()')
        text_list.pop(0)
        #写正文
        result = ''
        for text in text_list:
            result = result.strip() + text
        fp.write(result+'^&')
        fp.write(str(show)+'^&')
        fp.write(str(total_people)+'^&')
        for x in cmntlist:
            content=x['content']
            fp.write(content+'^&')
        return time

#@ time = etime
def run(time):
    if time[0:10]=='2020-01-01':
        return
    lasttime=time
    for page in range(1,12):
        urllist=getUrl(page,time)
        if(urllist=='strange web'):
            continue
        c_urllist=[]
        for url in urllist:
            c_addr=getCommentWeb(url)
            if(c_addr=='strange web'):
                continue
            temp=getComments(c_addr,url)
            if temp != None:
                lasttime=temp
            #print
    run(lasttime)


#手动输入第一个etime，stime在getUrl里手动输入
if __name__ == '__main__':
    run('2020-06-01 23:59:59')


