from database.db_handler import MysqlHander
from common.my_http import MyHttp
import urllib.request
from bs4 import BeautifulSoup
from common.macro_data import MacroData
import glob
import jieba
import re
import threading
import hashlib

'''
宏观数据
'''

class GuowuyuanData:
    def __init__(self):
        self.macro = MacroData()
        pass
    
    def guowuyuan_lishi(self):
        for i in range(0,80):
            url = "http://sousuo.gov.cn/column/30469/" + str(i) + ".htm"
            bs4_data = MyHttp.bs4_utf8_data(url)
            datas = bs4_data.find_all(name="ul", attrs={'class':'listTxt'})
            for d in datas:
                for valid_data in d.find_all(name="h4"):
                    #print(str(valid_data))
                    url_data = valid_data.find("a").attrs["href"]
                    title_data = valid_data.find("a").text.replace(u"（","(").replace(u"）",")")
                    date_data = valid_data.find("span").text.replace(".","-")
                    print(url_data + title_data + date_data)
                    self.macro.add_macro_data(date_data, 100, title_data, url_data, "中国国务院")
    
    def guowuyuan(self):
        url = "http://www.gov.cn/zhengce/index.htm"
        bs4_data = MyHttp.bs4_utf8_data(url)
        #datas = bs4_data.find_all(name="div", attrs={'class':'latestPolicy_left_item'})
        for valid_data in bs4_data.find_all(name="div", attrs={'class':'latestPolicy_left_item'}):
            #print(str(valid_data))
            url_data = valid_data.find("a").attrs["href"]
            title_data = valid_data.find("a").text.replace(u"（","(").replace(u"）",")")
            date_data = valid_data.find("span").text.replace(".","-")
            print(url_data + title_data + date_data)
            self.macro.add_macro_data(date_data, 100, title_data, url_data, "中国国务院")
    
    def guowuyuan_dongtai(self):
        url = "http://www.gov.cn/guowuyuan/index.htm"
        bs4_data = MyHttp.bs4_utf8_data(url)
        #datas = bs4_data.find_all(name="div", attrs={'class':'latestPolicy_left_item'})
        for valid_data in bs4_data.find(name="div", attrs={'class':'zl_channel_body'}).find_all(name="li"):
            #print(str(valid_data))
            #if valid_data.find("a").text.find("href") > 0:
            if len(valid_data.find_all(name="a")) > 1:
                print(valid_data.find_all(name="a")[1])
                data = str(valid_data.find_all(name="a")[1]).replace("<a href=\"","").replace("\" target=\"_blank\"","").replace("</a>","")
                ds = data.split(">")
                url_data = ds[0]
                title_data = ds[1].replace(u"（","(").replace(u"）",")")
                dates = url_data.split("/")
                print(url_data)
                print(str(len(dates)))
                date_data = dates[4] + "-" + dates[5]
                print(url_data + title_data + date_data)
                self.macro.add_macro_data(date_data, 100, title_data, url_data, "中国国务院")
            else:
                url_data = valid_data.find("a").attrs["href"]
                title_data = valid_data.find("a").text.replace(u"（","(").replace(u"）",")")
                dates = url_data.split("/")
                #print(url_data)
                #print(str(len(dates)))
                date_data = dates[4] + "-" + dates[5]
                print(url_data + title_data + date_data)
                if url_data.find(r"zhuanti") > 0:
                    print("00000")
                    continue
                self.macro.add_macro_data(date_data, 100, title_data, url_data, "中国国务院")

    
    def get_data(self):
        #self.guowuyuan_lishi()
        self.guowuyuan()
        self.guowuyuan_dongtai()

def guowuyuan_api():
    d = GuowuyuanData()
    d.get_data()

if __name__ == '__main__':
    guowuyuan_api()
