#coding:utf-8
#mac保留登陆信息有效
#https://www.cnblogs.com/csubcc/p/15728380.html

from selenium import webdriver
from selenium.webdriver import ChromeOptions
from lxml import etree
import requests
import time
import csv
import os
import html as ht
from lxml import html
import re

chromeDriverPath='./chromedriver_mac_arm64/chromedriver'
bro=None
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}

def txt_clean(txt):
    print('清洗')
    txt=re.sub('\n+\s+','\n',txt)
    return txt
def remove_node(HTML):
    tree = html.fromstring(HTML)
    ele = tree.xpath('//script | //style')  
    for e in ele:
        e.getparent().remove(e)

    Html= html.tostring(tree).decode()   #tostring()返回的是bytes类型，decode()转成字符串
    print(ht.unescape(Html))    #unescape()将字符串中的uncode变化转成中文
    return ht.unescape(Html)

def mymkdir(filepath):
    print(type(filepath))
    filepath = filepath.strip()
    filepath = filepath.rstrip("\\")
    isExists = os.path.exists(filepath)
    if isExists:
        print(filepath + ' 目录已存在')
        return False
    else:
        os.makedirs(filepath)
        print('创建成功')
        return True
    
def saveNews(lanmu,itemList):
    print(lanmu,'存储')
    timepath=time.strftime('%Y/%m/%d/')
    timepath=timepath+'wangyi/'
    print(timepath)
    mymkdir(timepath)
    datapath=timepath+lanmu+'.csv'
    # 创建或打开文件
    csvfile = open(datapath, mode='w', newline='')
    # 标题列表
    fieldnames = ['title', 'content', 'url']
        # 创建 DictWriter 对象
    write = csv.DictWriter(csvfile, fieldnames=fieldnames)
    # 写入表头
    write.writeheader()
    
    if itemList:
        for item in itemList:
            # 写入数据
            write.writerow(item)



def getDetailsInfo(title,url):
    print("详情请求地址=",url)
    # bro.get(url)
    # #time.sleep(1)
    # pageText=bro.page_source
    html = requests.get(url, headers=header).text
    html=remove_node(html)
    contentTree=etree.HTML(html)
    print('details title:',title)    
    cInfo=contentTree.xpath('//*[@id="content"]/div[2]//text()')
    content=''
    for i in cInfo:
        content=content+i.strip()+'\r\n'
    #cInfo=''.join(cInfo)
    content=txt_clean(content)
    print('details content:',content)
    #time.sleep(1)
    resultContent={'title':title,'content':content,'url':url}
    return resultContent


def getTitleUrl(lurl):
    print('进入栏目处理=',lurl)
    print('进入url处理=',lurl.get('url'))
    bro.get(lurl.get('url'))
    time.sleep(1)
    pageText=bro.page_source
    tree=etree.HTML(pageText)
    div_list=tree.xpath('//div[@class="ndi_main"]/div')
    #print(div_list)
    if div_list:
        #解析存储新闻内容
        newsList=[]
        for titleDiv in div_list:
            titleName=titleDiv.xpath('.//div[@class="news_title"]/h3/a/text()')
            if titleName:
                titleName=titleName[0]
                detailsUrl=titleDiv.xpath('.//div[@class="news_title"]/h3/a/@href')[0]
                #print(detailsUrl)
                print(titleName)
                newsItem=getDetailsInfo(titleName,detailsUrl)
                newsList.append(newsItem)
        if len(newsList)>0:
            print(lurl.get('title'),'长度=',len(newsList))    
            saveNews(lurl.get('title'),newsList)   
    else:
        print(lurl.get('url'),'解析为空')



if __name__=='__main__':
    #加载cookies中已经保存的账号和密码
    options = ChromeOptions()
    options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
    bro=webdriver.Chrome(executable_path=chromeDriverPath,chrome_options=options)

    urls=[{'title':'国内','url':'https://news.163.com/domestic'},
          {'title':'国际','url':'https://news.163.com/world'},
          {'title':'数读','url':'https://data.163.com/special/datablog'},
          {'title':'军事','url':'https://war.163.com'},
          {'title':'航空','url':'https://news.163.com/air'},
          ]
    #urls=[{'title':'国内','url':'https://news.163.com/domestic'}]
    #urls={'https://news.163.com/domestic'}
    for u in urls:
        getTitleUrl(u)

    bro.close
    print("结束")

