import requests
import logging
from bs4 import BeautifulSoup
from collections import OrderedDict
import re
import logging
import json
logging.basicConfig(level=logging.WARNING,
                    format='%(asctime)s %(levelname)s %(message)s')


def getreq(webpath,endurl,finushUrls):
    flag = 0
    while True:
        flag+=1
        try:
            r = requests.get(webpath+endurl,timeout = 5)
            if r.status_code == requests.codes.ok:
                finushUrls.append(endurl)
                return r.text
            else:
                print(r.status_code)
                print(endurl)
        except Exception as e:
            logging.exception(e)
            pass
        if flag >5:
            raise

def firstLeave(soup):
    lis = soup.find('ul', id='njtyt').find_all('li')
    urls = OrderedDict()
    for li in lis:
        # print(li.a.string)
        urls[li.a.string] = li.a['href']
    return urls

def secondLeave(soup):
    lis = soup.find('ul', id='njyyt').find_all('li')
    urls = OrderedDict()
    for li in lis:
        urls[li.a.string] = li.a['href']
    return urls

def thridLeave(soup):
    lis = soup.find('ul', id='njcyt').find_all('li')
    urls = OrderedDict()
    for li in lis:
        # print(li.a.string)
        urls[li.a.string] = li.a['href']
    return urls

def getarea(soup):
    lis = soup.find('ul', id='njayt').find_all('li')
    areas = []
    for li in lis:
        if '全部' not in li.a.string:
            areas.append(li.a.string)
    return areas

def getAllPage(soup,iurl):
    aas = soup.find('div', class_='hb-pg').find_all('a')
    pages = []
    if len(aas) != 0 and int(aas[-1].string)>9:
        print('page is too much : %s'%iurl)
        templete = aas[2].get('href')
        for id in range(1,int(aas[-1].string)+1):
            r = re.sub(r'i=\w*','i=%s'%id,templete)
            pages.append('list.aspx%s'%r)
    else:
        for a in aas:
            u = a.get('href')
            if u:
                pages.append('list.aspx'+u)

    return pages

def getTables(soup):
    lis = soup.find('ul', class_='hblist-s1').find_all('li')

    tables = []
    for li in lis:
        table = []
        spans = li.a.find_all('span')
        # print(spans[0].string)
        if li.a.get('href'):
            r = re.findall('d=(\d*)',li.a['href'])
            for string in spans[0].strings:
                table.append(string)
            table.append(r[0])
            for string in spans[1].strings:
                table.append(string)

            tables.append(table)

    return tables



def main():
    finish = ['辽宁统计年鉴', '吉林统计年鉴', '北京统计年鉴', '广东统计年鉴', '江西统计年鉴', '内蒙古统计年鉴', '河北统计年鉴', '江苏统计年鉴', '湖北统计年鉴', '福建统计年鉴', '河南统计年鉴', '湖南统计年鉴', '山西统计年鉴', '浙江统计年鉴', '山东统计年鉴', '上海统计年鉴', '天津统计年鉴', '安徽统计年鉴', '黑龙江统计年鉴']
    todo=['广西统计年鉴','中国第三产业统计年鉴']
    todo=['广西统计年鉴',]
    webpath = 'http://www.tjsql.com/'
    firsturl = 'list.aspx'

    # finushpool
    finushUrls = []

    # urlPools = {'list.aspx':'全部年鉴'}
    res = getreq(webpath,firsturl,finushUrls)

    soup = BeautifulSoup(res, "lxml")
    urls = firstLeave(soup)

    if '全部年鉴' in urls:
        del(urls['全部年鉴'])
    # for each in finish:
    #     del(urls[each])
    # 处理每个省份报表

    #处理todo
    # todomap = {}
    # for each in todo:
    #     todomap[each]=urls[each]
    # urls = todomap

    for reportName,url in urls.items():
        MYDATA = OrderedDict()
        try:
            page = getreq(webpath,url,finushUrls)
            soup = BeautifulSoup(page, "lxml")
            areas = getarea(soup)
            years = secondLeave(soup)
            MYDATA['name'] = reportName
            MYDATA['areas'] = areas
            MYDATA['years'] = OrderedDict()
            #循环每一年
            for year,yurl in years.items():
                try:
                    ypage = getreq(webpath,yurl,finushUrls)
                    ysoup = BeautifulSoup(ypage, "lxml")
                    items = thridLeave(ysoup)

                    MYDATA['years'][year] = OrderedDict()

                    if '全部栏目' in items:
                        del(items['全部栏目'])

                    # 循环每个栏目
                    for iname,iurl in items.items():
                        try:
                            ipage = getreq(webpath,iurl,finushUrls)
                            isoup = BeautifulSoup(ipage,'lxml')
                            pages = getAllPage(isoup,iurl)



                            pages.append(iurl)
                            myTables =[]
                            for purl in pages:
                                try:
                                    ppage = getreq(webpath,purl,finushUrls)
                                    psoup = BeautifulSoup(ppage,'lxml')
                                    tables = getTables(psoup)
                                    # print(tables)
                                    myTables.extend(tables)
                                except RuntimeError as e:
                                    logging.exception(e)
                                    print(purl)
                                except Exception as e:
                                    logging.exception(e)
                                    print(purl)

                            MYDATA['years'][year][iname.replace('.',' ')] = myTables
                        except RuntimeError as e:
                            logging.exception(e)
                            print(purl)
                        except Exception as e:
                            logging.exception(e)
                            print(iurl)
                            exit()
                except RuntimeError as e:
                    logging.exception(e)
                    print(purl)
                except Exception as e:
                    logging.exception(e)
                    print(yurl)
                    exit()
        except RuntimeError as e:
            logging.exception(e)
            print(purl)
        except Exception as e:
            logging.exception(e)
            print(url)
            exit()
        # print(MYDATA)
        jsons = json.dumps(MYDATA,ensure_ascii=False)
        with open('../json/%s.json'%reportName,'wt')as f:
            f.write(jsons)
        # break
    # print(finushUrls)

if __name__ == '__main__':
    main()
