import requests
import json
import hashlib
import dataAdapter
import re
from bs4 import BeautifulSoup
import read_files

baseUrl="http://www.wuhanrc.org.cn/"
dlist = []   #捐赠列表
glist = []   #发放列表




#获取页面总数
def get_pages():
    resp = requests.get(baseUrl + "dtzx/jzgst.htm")
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    count = bs4.find('td',id='fanye1105').text
    count = count[-2:-1]
    return count

#对捐款，捐物，发放三种页面分类处理
def get_urls(page):
    if page==0:
        path = "dtzx/jzgst.htm"
    else:
        path = "dtzx/jzgst/"+str(page)+".htm"
    print(baseUrl+path)
    resp = requests.get(baseUrl+path)
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    list = bs4.select(".list_n_news > li > a")
    urls = []
    for item in list:
        dic = {}
        url = item.get("href")
        title = item.text
        title = deal_value(title)
        time = item.find('span').text
        time = deal_value(time)
        patt = r'info\S+'
        pattern = re.compile(patt)
        res = pattern.findall(url)[0]
        url = baseUrl + res
        dic['title']=title
        dic['time']=time
        dic['url']=url
        urls.append(dic)
    print(urls)
    for dic in urls:
        if re.search("接收", dic['title']):
            donation(dic['title'],dic['url'],dic['time'])
            getData_files(dic['url'],dic['time'])  # 下载文件并解析内容到dlist
        if re.search("发放", dic['title']) or re.search("使用", dic['title']):
            grant(dic['title'],dic['url'],dic['time'])

#捐款处理器
def donation(title,url,time):
    list = deal_detail(url,"don")
    if list!= None:
        for item in list:
            item['时间'] = time
            res = dataAdapter.donationToJson(item)
            dlist.append(res)

#发放处理器
def grant(title,url,time):
    list = deal_detail(url,"grant")
    if list != None:
        for item in list:
            item['时间']=time
            res = dataAdapter.grantToJson(item)
            glist.append(res)




#详情页处理函数
def deal_detail(url,type):
    resp = requests.get(url)
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    # # 获取文件
    # save_files(bs4,url)
    # 提取网页表格
    return get_data(bs4,type)




#网页表格提取函数
def get_data(bs4,type):
    tables = bs4.find_all('tbody')
    if len(tables)>0:
        for table in tables:
            # 提取标题
            items = table.find("tr").find_all("td")
            mcloum = 0
            titles = []
            for item in items:
                title = deal_value(item.text)
                #表格类型二次确定
                if re.match("捐赠",title) != None and re.match("捐赠额",title) == None and type=="grant":
                    return
                if re.match("发放",title) != None and type == "don":
                    return
                titles.append(title)
            # 提取列表
            list = table.find_all("tr")[1:]
            mtitle = None
            ths = []
            for i in list:
                if i.find('td').get("rowspan") != None:
                    mtitle = i.find('td').text
                tds = i.find_all("td")
                values = []
                if len(tds) < len(titles):
                    values.append(mtitle)
                for td in tds:
                    v = deal_value(td.text)
                    values.append(v)
                th = {}
                for k in range(0, len(values)):
                    th[titles[k]] = values[k]
                ths.append(th)
            return ths




#文件保存函数
def save_files(bs4,refer):
    fileNames=[]     #存放文件名的列表
    ft_list = []
    l = bs4.find_all('form')
    for i in l:
        if i.get('name') == "_newscontent_fromname":
            flag = i.find('ul')
            if flag != None:
                ft_list = flag.find_all('a')
            else:
                return
    file_urls = []
    for item in ft_list:
        file_url = {}
        url = item.get('href')
        url = baseUrl + url[6:]
        filename = item.text
        file_url['url']=url
        file_url['filename']=filename
        file_urls.append(file_url)


    header = {
        'Connection' : 'close',
        'Referer' : refer
    }

    #下载文件
    for file_url in file_urls:
        res = requests.get(file_url['url'],headers=header)
        res.raise_for_status()
        file = open("files/"+file_url['filename'],'wb')
        for chunk in res.iter_content(10000):
            file.write(chunk)
        file.close()
        fileNames.append("files/"+file_url['filename'])
    return fileNames


#文件抽取函数
def getData_files(url,time):
    resp = requests.get(url)
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    fileNames=save_files(bs4,url)
    tmplist=[]
    if fileNames is None:
        return
    for fileName in fileNames:
        if "pdf" in fileName:
            tmplist=read_files.read_pdf(fileName,"武汉红十字基金会",time)
        elif "xls" in fileName:
            tmplist=read_files.read_excel(fileName, "武汉红十字基金会", time)
        dlist.extend(tmplist)
        # print(tmplist)
        tmplist = []


#字符处理函数
def deal_value(v):
    if v == '\u200b':
        v = "无"
    v = v.replace(u'\xa0', u'')
    v = v.replace(u'\u3000', u'')
    v = v.replace(' ', '')
    v = v.replace('\r','')
    v = v.replace('\n','')
    return v





if __name__ == '__main__':
    c = get_pages()
    for k in range(0,int(c)):
        get_urls(k)
    # get_urls()
    print(dlist)
    print(glist)
    print(len(dlist))
    print(len(glist))
    contentD=str(dlist)
    contentG=str(glist)
    with open("dlist.txt", 'w',encoding='utf-8') as file_obj:
        file_obj.write(contentD)
    with open("glist.txt", 'w',encoding='utf-8') as file_obj:
        file_obj.write(contentD)
