import requests
import json
import re
import datetime
from lxml import etree
import time
import redis

# 爬取财经


headers = {
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)",
    "Cookie" : "__root_domain_v=.163.com; _qddaz=QD.742212123935417; wyy_uid=f9ffa1ce-f0c2-4c86-a43d-8f938f720c6d; locale=zh_CN; _gcl_au=1.1.688806778.1712123941; _ntes_nnid=94a445a4a5520a72a44236ea4de1ca49,1713104236379; _ntes_nuid=94a445a4a5520a72a44236ea4de1ca49; pver_n_f_l_n3=a; Hm_lvt_f8682ef0d24236cab0e9148c7b64de8a=1715084584; _ntes_origin_from=; s_n_f_l_n3=b46f376ee3a5b8811715139192305; _antanalysis_s_id=1715139262013; ne_analysis_trace_id=1715139520290; vinfo_n_f_l_n3=b46f376ee3a5b881.1.5.1713704217201.1715098368987.1715139554805"
}
datalist = []
def info(urls,isNowDay):
    url = urls
    print("------------------------------",url)

    resp = requests.get(url=url,headers=headers)
    print(resp.status_code)
    if resp.status_code != 200:
        #代表爬取失败
        return;
    data =  resp.text
    data = data.replace("data_callback(",'')
    data = data.replace(")",'')

    dirData = json.loads(data)
    # print(dirData)

    # 1.判断是否是当天的内容
    # "time":"05/07/2024 14:51:21",
    current_time_str = datetime.datetime.now().strftime("%m/%d/%Y")
    
    #进行筛选 
    for item in dirData:
        # item 每一个新闻块
        print(item['time'])
        if (( isNowDay or item['time'].find(current_time_str) != -1) and len(item['imgurl'].strip()) != 0):
            print(item['imgurl'].strip())
            #获取详细信息
            data = {
                "coverUrl":item['imgurl'].strip(),
                "title":item['title'],
                "time":item['time'],
                "content":''
            }   
            resp = requests.get(url=item['docurl'],headers=headers)
            html = etree.HTML(resp.text)
            content = html.xpath("//div[@class='post_body']")
            if content:
                content = content[0]
            else:
                continue
            content = etree.tostring(content,encoding='utf-8', pretty_print=True, method="html").decode('utf-8')
            data['content'] = content
            
            # 加入到总集合里
            datalist.append(data)
            
    #关闭连接 
    resp.close()



def getUrl(name,number):
    if name != "财经" and number != "":
        number = "_"+number
    urls = {
        "财经":f"https://money.163.com/special/00259BVP/news_flow_index{number}.js?callback=data_callback",
        "娱乐":f"https://ent.163.com/special/000380VU/newsdata_index{number}.js?callback=data_callback",
        "科技":f"https://tech.163.com/special/00097UHL/tech_datalist{number}.js?callback=data_callback",
        "生活":f"https://travel.163.com/special/00067VEJ/newsdatas_travel{number}.js?callback=data_callback",
    }
    return urls[name]           


def exec(name,isNotNowDay):
    redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
    newsNumber = 50;
    i = 1
    preNumber = 0
    while len(datalist) < newsNumber:
        if i==1:
            info(getUrl(name,""),isNotNowDay)
        else:
            if i>=10:
                info(getUrl(name,str(i)),isNotNowDay)
            else:
                strNumber  = "0"+str(i)
                info(getUrl(name,strNumber),isNotNowDay)
        i = i+1
        if preNumber == len(datalist):
            # 代表已经爬不到数据了
            break;
        preNumber = len(datalist)
        #延迟两秒
        time.sleep(1)
    # 写入redis中
    strData = json.dumps(datalist)
    redis_client.set(keys[name],strData)
    datalist.clear()



list = ["生活","娱乐","财经","科技"]
# list = ["生活"]
keys = {
    "生活":"Life",
    "娱乐":"Recreation",
    "财经":"Finance",
    "科技":"Science"
}

for i in list:
    exec(i,False)




    




