from requests_html import HTMLSession
from pprint import pprint
import json
import pymysql

MYSQL_CONFIG = {
    'HOST': '127.0.0.1',
    'PORT': 3306,
    'USER': 'root',
    'PASSWORD': 'yangtaoo',
    'DB': 'news',
    'CHARSET': 'utf8'
}

class Mysql( ):
    def __init__(self):
        self.host = MYSQL_CONFIG['HOST']
        self.port = MYSQL_CONFIG['PORT']
        self.user = MYSQL_CONFIG['USER']
        self.password = MYSQL_CONFIG['PASSWORD']
        self.db = MYSQL_CONFIG['DB']
        self.charset = MYSQL_CONFIG['CHARSET']

    def get_mysql_con(self):
        return pymysql.connect(
            host=self.host,
            port=self.port,
            user=self.user,
            password=self.password,
            db=self.db,
            charset=self.charset
        )

    # 插入新闻
    def insert_junshinews(self,DocID,Title,NodeId,PubTime,LinkUrl,Abstract,Editor,SourceName):
        con = self.get_mysql_con( )
        cur = con.cursor( )
        sql = "insert into junshi (DocID,Title,NodeId,PubTime,LinkUrl,Abstract,Editor,SourceName) values(%s,%s,%s,%s,%s,%s,%s,%s);"
        cur.execute(sql,(DocID,Title,NodeId,PubTime,LinkUrl,Abstract,Editor,SourceName))
        con.commit()



if __name__=="__main__":
    print('start...')


    mysql = Mysql()
    session = HTMLSession()
    start_url = "http://www.xinhuanet.com/mil/index.htm"

    res = session.get(start_url)
    # print(res.text)
    # print(res.encoding)

    # print(res.text.encode('ISO-8859-1').decode('utf-8'))
    # 提取链接
    # print(res.html.links)
    # 还是按照网站结构爬取
    # 先找导航
    items = res.html.xpath("//ul[@class='nav_list']/li[@class='on']")

    # print(items)
    for item in items:
        item_name = item.xpath("//text()")[0]
        item_url = item.xpath("//@href")[0]
        # 不要第一个
        if len(item_name)>1:
            item_url = "http://www.xinhuanet.com/mil/"+item_url
            print(item_name,item_url)
            item_res = session.get(url=item_url)
            script = item_res.html.xpath("//script")[3]
            script_text = script.xpath("//text()")[0]
            pagenid=None
            pagenum = 1
            # 分页
            cnt = 30
            for lin in script_text.splitlines():
                if "getDataEvent" in lin:
                    pagenid = lin.split(":")[5].split("\"")[1]
                else:
                    pass
            print(pagenid)
            data_url = "http://qc.wa.news.cn/nodeart/list?nid="+str(pagenid)+"&pgnum="+str(pagenum)+"&cnt="+str(cnt)+"&tp=1&orderby=1"
            data_res = session.get(url=data_url)
            data = data_res.text.replace("(","").replace(")","")
            json_data = json.loads(data)
            total = json_data['totalnum']
            total_page = int(total/cnt)+1
            print(total_page)
            for page in range(1,total_page+1):
                page_url = "http://qc.wa.news.cn/nodeart/list?nid="+str(pagenid)+"&pgnum="+str(page)+"&cnt="+str(cnt)+"&tp=1&orderby=1"
                page_res = session.get(page_url)
                print(page)
                page_json_data = json.loads(page_res.text.replace("(","").replace(")",""))
                # pprint(page_json_data)
                for data in page_json_data['data']['list']:
                    # DocID,Title,NodeId,PubTime,LinkUrl,Abstract,Editor,SourceName
                    # mysql.insert_junshinews(DocID=data['DocID'],Title=data['Title'],NodeId=data['NodeId'],PubTime=data['PubTime'],LinkUrl=data['LinkUrl'],Abstract=data['Abstract'],Editor=data['Editor'],SourceName=data['SourceName'])
                    print(data['LinkUrl'])
                    article_res = session.get(data['LinkUrl'])
                    if article_res.html.xpath("//div[@id='p-detail']"):
                        detail = article_res.html.xpath("//div[@id='p-detail']/p/text()")
                        print(detail)
                    else:
                        pass
        print(item_name+'done...')
        print('-'*64)
    print('all done...')






