#注意save_files中的路径问题
import requests
import json
import hashlib
from Whspider import dataAdapter
import re
from bs4 import BeautifulSoup
from Whspider import read_files


baseUrl="http://www.wuhanrc.org.cn/"
subUrl="dtzx/jzgst"
# subUrl="dtzx"
dlist = []   #捐赠列表
glist = []   #发放列表
urls  = []   #urls列表

#获取页面总数
def get_pages():
    resp = requests.get(baseUrl + "dtzx/jzgst.htm")
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    count = bs4.find('td',id='fanye1105').text
    count = count[-2:-1]
    return count

#对捐款，捐物，发放三种页面分类处理
def get_urls(page):
    if page==0:
        path = subUrl+".htm"
    else:
        path = subUrl+"/"+str(page)+".htm"

    print(baseUrl+path)
    resp = requests.get(baseUrl+path)
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    ul = bs4.find("ul",class_="list_n_news")
    # li = ul.find_all_next("li",style="display:none")
    # print(len(li))
    list = bs4.select(".list_n_news > li > a")
    # for a in list:
    #     parent = a.find_parent()
        # print(parent.get("style"))
        # if parent.get("style")=="display:none" :
        #     list.remove(a)

    for item in list:
        dic = {}
        url = item.get("href")
        title = item.text
        title = deal_value(title)
        time = item.find('span').text
        time = deal_value(time)
        patt = r'info\S+'
        pattern = re.compile(patt)
        res = pattern.findall(url)[0]
        url = baseUrl + res
        dic['title']=title
        dic['time']=time
        dic['url']=url
        flag = True
        for item in urls:
            if item['url']==url:
                flag =False
        if flag:
            urls.append(dic)




def deal_urls(size):
    global urls
    urls=urls[0:size]
    print(urls)
    print(len(urls))
    for dic in urls:
        if re.search("接收", dic['title']):
            donation(dic['title'], dic['url'], dic['time'])

        if re.search("发放", dic['title']) or re.search("使用", dic['title']):
            grant(dic['title'], dic['url'], dic['time'])


#捐款处理器
def donation(title,url,time):
    flag = getData_files(url,time)  # 下载文件并解析内容到dlist
    if flag!=True :
        list = deal_detail(url, "don")
        if list != None:
            for item in list:
                item['时间'] = time
                res = dataAdapter.donationToJson(item)
                dlist.append(res)


#发放处理器
def grant(title,url,time):
    list = deal_detail(url,"grant")
    if list != None:
        for item in list:
            item['时间']=time
            res = dataAdapter.grantToJson(item)
            glist.append(res)




#详情页处理函数
def deal_detail(url,type):
    resp = requests.get(url)
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    # 获取文件
    # save_files(bs4,url)
    # 提取网页表格
    return get_data(bs4,type)




#网页表格提取函数
def get_data(bs4,type):
    tables = bs4.find_all('tbody')
    ths = []
    if len(tables)>0:
        for table in tables:
            # 提取标题
            start = 1
            items = table.find("tr").find_all("td")
            if len(items)< 2:
                items = table.find_all("tr")[1].find_all("td")
                start = 2
            mcloum = 0
            titles = []
            for item in items:
                title = deal_value(item.text)
                #表格类型二次确定
                if re.match("捐赠",title) != None and re.match("捐赠额",title) == None and type=="grant":
                    return
                if re.match("发放",title) != None and type == "don":
                    return
                titles.append(title)
            # 提取列表
            list = table.find_all("tr")[start:]
            mtitle = None

            for i in list:
                if i.find('td').get("rowspan") != None:
                    mtitle = i.find('td').text
                tds = i.find_all("td")
                values = []
                if len(tds) < len(titles):
                    values.append(mtitle)
                for td in tds:
                    v = deal_value(td.text)
                    values.append(v)
                th = {}
                for k in range(0, len(values)):
                    th[titles[k]] = values[k]
                ths.append(th)
        return ths




#文件保存函数
def save_files(bs4,refer):
    fileNames=[]     #存放文件名的列表
    ft_list = []
    l = bs4.find_all('form')
    for i in l:
        if i.get('name') == "_newscontent_fromname":
            flag = i.find('ul')
            if flag != None:
                ft_list = flag.find_all('a')
            else:
                return
    file_urls = []
    for item in ft_list:
        file_url = {}
        url = item.get('href')
        url = baseUrl + url[6:]
        filename = item.text
        file_url['url']=url
        file_url['filename']=filename
        file_urls.append(file_url)


    header = {
        'Connection' : 'close',
        'Referer' : refer
    }

    #下载文件
    for file_url in file_urls:
        res = requests.get(file_url['url'],headers=header)
        res.raise_for_status()
        file = open("D:/files/"+file_url['filename'],'wb')
        for chunk in res.iter_content(10000):
            file.write(chunk)
        file.close()
        fileNames.append("D:/files/"+file_url['filename'])
    return fileNames

#文件抽取函数
def getData_files(url,time):
    resp = requests.get(url)
    bs4 = BeautifulSoup(resp.content.decode("utf-8"), "lxml")
    fileNames=save_files(bs4,url)
    tmplist=[]
    if fileNames is None:
        return False
    for fileName in fileNames:
        if "pdf" in fileName:
            tmplist=read_files.read_pdf(fileName,"武汉红十字基金会",time)
        elif "xls" in fileName:
            tmplist=read_files.read_excel(fileName, "武汉红十字基金会", time)
        dlist.extend(tmplist)
        # print(tmplist)
        tmplist = []
    return True




#字符处理函数
def deal_value(v):
    if v == '\u200b':
        v = "无"
    v = v.replace(u'\xa0', u'')
    v = v.replace(u'\u3000', u'')
    v = v.replace(' ', '')
    v = v.replace('\r','')
    v = v.replace('\n','')
    return v





#完成调用爬虫及信息入库
from dg_cszg import models
import datetime

def run_spider():
                       #1.注意修正suburl
    page=1             #2.只爬几页
    size=8             #3.只爬每页前几个
    starttime = datetime.datetime.now()
    # c = get_pages()
    c=page
    for k in range(0,int(c)):
        get_urls(k)
    deal_urls(size)
    don_keys=["jjh" ,"jzdw" ,"rq" ,"lx" ,"rkwz", "slz" ,"jldw" ,"je" ,"jydh"]
    gran_keys=["jjh" ,"ffdw" ,"rq" ,"lx" ,"ffwz", "slz" ,"jldw" ,"je" ,"jydh"]
    count=0
    for item in dlist:
        count = count + 1
        tmp_dict=item
        for tag in don_keys:
            if tag not in tmp_dict.keys():
                tmp_dict[tag]="null"
            if tmp_dict[tag] is None:
                print(tmp_dict,"||",count)
                tmp_dict[tag]="Error"
        tmp_text=tmp_dict["je"]
        tmp_text=tmp_text.replace(",","").replace("，","")
        tmp_dict["je"]=tmp_text
        record=models.donation_info (jjh=tmp_dict["jjh"], jzdw=tmp_dict["jzdw"], rq=tmp_dict["rq"], lx=tmp_dict["lx"],
                                     rkwz=tmp_dict["rkwz"], slz=tmp_dict["slz"], jldw=tmp_dict["jldw"],
                                     je =tmp_dict["je"],jydh=tmp_dict["jydh"])
        record.save()
        print("正在入库武汉捐赠第",count,"例","还有",len(dlist)-count,"例")
    sum = count
    count=0
    for item in glist:
        tmp_dict=item
        for tag in gran_keys:
            if tag not in tmp_dict.keys():
                tmp_dict[tag]="null"
        record=models.grant_info (jjh=tmp_dict["jjh"], ffdw=tmp_dict["ffdw"], rq=tmp_dict["rq"], lx=tmp_dict["lx"],
                                     ffwz=tmp_dict["ffwz"], slz=tmp_dict["slz"], jldw=tmp_dict["jldw"],
                                     je =tmp_dict["je"],jydh=tmp_dict["jydh"])
        record.save()
        count=count+1
        print("正在入库武汉发放第",count,"例","还有",len(glist)-count,"例")
    endtime = datetime.datetime.now()
    time=((endtime - starttime).seconds)/60
    print("爬取并入库武汉信息",sum+count,"条,一共耗时",time,"min")
    return {"status":"ok"}


# def run_spider():
#     data=models.donation_info.objects.filter(rkwz__contains="口罩")
#     result={"code":"200","ret_msg":"success","data":json.loads(json.dumps(list(data), default=lambda obj: obj.__dict__))}
#     return result