import time
import os
import json
from urllib import request
from lxml import etree
 
header_dict = {
    "Accept":"application/json, text/javascript, */*; q=0.01",
    "Accept-Language":"zh-CN,zh;q=0.9",
    "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
    }
def get_http(load_url,header=None):
    res=""
    try:
        req = request.Request(url=load_url,headers=header)#创建请求对象
        coonect = request.urlopen(req)#打开该请求
        byte_res = coonect.read()#读取所有数据，很暴力
        try:
            res=byte_res.decode(encoding='utf-8')
        except:
            try:
                res=byte_res.decode(encoding='gbk')
            except:
                res=""
    except Exception as e:
        print(e)
    return res
 
#创建数据文件夹
if not os.path.exists("./sina_data"):
    os.mkdir("sina_data")
#输出的文件
raw_file=open("./sina_data/raw.txt","w",encoding="utf-8")
json_file=open("./sina_data/json.txt","w",encoding="utf-8")
 
#数据存储模板
obj={"title":"","url":"","content":""}
 
saved_url={"http://www.sina.com.cn/"}#记录已经爬取过的页面
url_list=["http://www.sina.com.cn/"]#url队列
 
while len(url_list)>0:
    #删除url_list中最后一个数,并返回
    url=url_list.pop()
    #获取html
    html_text=get_http(url,header_dict)
    if html_text=="":
        continue
    time.sleep(0.15)
    try:
        tree=etree.HTML(html_text)
        print(dir(tree))
        #url有这样特征的可能是文章
        if url.find("html")>=0 and url.find("list")<0:
            #找出文章标题，根据多个页面找出标题规则，发现不满足该规则的标题要添加进去
            t_xpath=[
                "//h1[@id='main_title']/text()",
                "//h1/text()",
                "//th[@class='f24']//font/text()",
                "/html/head/title/text()"
                ]
            title=[]
            for tx in t_xpath:
                if len(title)==0:
                    title=tree.xpath(tx)
                else:
                    break
            
            #找出文章正文，根据多个页面找出正文规则，发现不满足该规则的正文要添加进去
            c_xpath=[
                "//div[@id='artibody']//p/text()",
                "//td[@class='l17']//p/text()",
                "//div[@class='content']//p/text()",
                "//div[@class='article']//p/text()",
                "//div[@id='article']//p/text()",
                "//div[@class='article-body main-body']//p/text()",
                "//div[@class='textbox']//p/text()"
                ]
            content=[]
            for cx in c_xpath:
                if len(content)==0:
                    content=tree.xpath(cx)
                else:
                    break
            
            if len(title)*len(content)==0:
                #没有标题或正文保存原始网页，这个可以不写，有些页面根本不是文章
                raw_file.write(html_text.replace("\n", "").replace("\r", ""))
                raw_file.write("\n")
                print("没有标题或正文"+url)
            else:
                #既有标题，也有正文保存数据
                obj["url"]=url
                obj["title"]=title[0]
                obj["content"]=" ".join(content)
                jstr=json.dumps(obj)
                json_file.write(jstr)
                json_file.write("\n")
                
        #找到所有url
        urls=tree.xpath("//a/@href")
        for u in urls:
            
            flag=False
            
            #过滤url
            end_filter=[".apk",".iso",".jpg",".jpeg",".bmp",".cdr",".php",".exe",".dmg",".apk"]
            for f in end_filter:
                if "".endswith(f):
                    flag=True
                    break
            if flag:
                continue
            
            find_filter=[
                "vip.","guba.","lottery.","kaoshi.","club.baby","jiancai.",".cn/ku/","astro.","match.","games.","zhongce","list",
                "photo.","yangfanbook","zx.jiaju","nc.shtml","english.","download","chexian","auto","video","comfinanceweb.shtml",
                "//sax.","login","/bc.","aipai.","vip.book","talk.t","slide.","club.baby","biz.finance","blog","comment5","www.leju",
                "http://m."
                ]
            for f in find_filter:
                if u.find(f)>=0:
                    flag=True
                    break
            if flag:
                continue
              
            if u.startswith("http") and u.find(".sina.")>=0:
                if u in saved_url:
                    continue
                #加入待爬取队列
                saved_url.add(url)
                url_list.append(u)
    except Exception as e:
        print("error")
raw_file.close()
json_file.close()