from requests_html import HTMLSession
from vis_sys.settings import FW_PASSWD,FW_USERID
from datetime import datetime, timedelta
import time
import hashlib
import pandas as pd

session = HTMLSession()
session_dp = HTMLSession()
session_gov = HTMLSession()

def login_page():
    # 后台登录

    url = 'http://foshanplus.hzfanews.com/v2/default.aspx'
    data = {
        "tbUserID": FW_USERID,
        "tbPwd": FW_PASSWD,
        "btnLogin": "登录",
    }

    res = session.get(url)
    form = res.html.find('form', first=True)
    auth_url  = 'http://foshanplus.hzfanews.com/UserManage/UserLogin/' + form.attrs['action'][2:]

    v1 = form.find('#__VIEWSTATE', first=True)
    data['__VIEWSTATE'] = v1.attrs['value']

    v2 = form.find('#__VIEWSTATEGENERATOR', first=True)
    data['__VIEWSTATEGENERATOR'] = v2.attrs['value']

    session.post(auth_url,data=data)


def login_dp_page():
    """
    凡闻数据大屏登录    
    """
    url ='http://dp.hzfanews.com/UserManage/UserLogin/Login.aspx?url=http://dp.hzfanews.com/prd/foshan/'
    data = {
        "tbUserID": "3231138@qq.com",
        "tbPwd": "18170947173",
        "btnLogin": "登 录",
        "cbAutoLogin": "on",
        "txtIPIsInner": "120.84.12.15",

    }

    res = session_dp.get(url)
    data['__VIEWSTATE'] = res.html.search('id="__VIEWSTATE" value="{}"')[0]

    data['__VIEWSTATEGENERATOR'] = res.html.search('id="__VIEWSTATEGENERATOR" value="{}"')[0]

    data['__EVENTVALIDATION'] = res.html.search('id="__EVENTVALIDATION" value="{}"')[0]

    post_url = res.html.search('action="{}"')[0]

    url = 'http://dp.hzfanews.com/UserManage/UserLogin/'+post_url[2:]
    session_dp.post(url,data=data)


def login_gov():
    url = 'http://gov.hzfanews.com/index.aspx'
    data = {
        "tbUserID": FW_USERID,
        "tbPwd": FW_PASSWD,
        "cbAutoLogin":"on",
        "txtIPIsInner":"121.9.15.138"
    }

    res = session_gov.get(url)
    form = res.html.find('form', first=True)
    auth_url = 'http://gov.hzfanews.com/UserManage/UserLogin/' + form.attrs['action']

    v1 = form.find('#__VIEWSTATE', first=True)
    data['__VIEWSTATE'] = v1.attrs['value']

    v2 = form.find('#__VIEWSTATEGENERATOR', first=True)
    data['__VIEWSTATEGENERATOR'] = v2.attrs['value']

    v3 = form.find('#__EVENTVALIDATION', first=True)
    data['__EVENTVALIDATION'] = v3.attrs['value']

    v4 = form.find('#btnLogin', first=True)
    data['btnLogin'] = v4.attrs['value']

    session_gov.post(auth_url, data=data)


def get_data(num=6,start=0, orderby='samecount+desc'):
    """
    媒体热点
    orderby=increment+desc, samecount, 'createindex_time+desc'
    """
    url = 'http://foshanplus.hzfanews.com/api/hotfind.ashx?whatDo=getMediaFocusArticle&start={start}' \
          '&limit={num}&orderby={orderby}'.format(
        num=num,
        start=start,
        orderby=orderby
    )
    res = session.get(url)
    res = res.json()
    if res['Succeed']:
        return res
    else:
        print('login first')
        login_page()
        res = get_data(num=num, start=start, orderby=orderby)
        return res


def get_toutiao_data(type_id, num=6, start=0):
    """
    头条信息: 微信(14162)，网媒(12839)，党(15628)，佛山新闻(6934)
    """
    url = 'http://foshanplus.hzfanews.com/api/ArticleList.ashx?whatDo=getArticleList&id={id}&start={start}' \
          '&limit={num}&orderby=updatetime+desc'.format(
        id=type_id,
        num=num,
        start=start,
    )
    res = session.get(url)
    res = res.json()
    if res['Succeed']:
        return res
    else:
        print('login first')
        login_page()
        res = get_toutiao_data(type_id=type_id,num=num, start=start)
        return res


def get_user_hot(from_date, to_date, num=6, start=0, orderby='degree+desc'):
    """
    凡闻用户热点
    orderby = 'degree+desc' or 'updatetime+desc' or 
    """
    url = 'http://foshanplus.hzfanews.com/api/hotfind.ashx?whatDo=getHotFindArticle&start={start}' \
          '&limit={num}&startDate={startDate}&endDate={endDate}&orderby={orderby}'.format(
        num=num,
        start=start,
        startDate=from_date,
        endDate=to_date,
        orderby=orderby
    )
    res = session.get(url)
    res = res.json()
    if res['Succeed']:
        return res
    else:
        print('login first')
        login_page()
        res = get_user_hot(from_date, to_date, num=num, start=start, orderby=orderby)
        return res


def get_statistics():
    """
    统计数据：文章总数量(newsTotalCount)，栏目总数(mediaCount)，今日更新文章总数(newsUpdateCount)
    """
    url = 'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetHomeStatistics'
    res = session.get(url)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        # 统计一个总数, 变成x,y
        result = {'total':{}}
        for key in ['newsTotalCount','newsUpdateCount', 'mediaCount']:
            tmp_total = 0
            tmp_list = []
            for item in res[key]:
                if item['name'] == "":
                    continue

                tmp_total += item['count']
                tmp_list.append({
                    'x': item['name'],
                    'y': item['count']
                })

            result[key] = tmp_list
            result['total'][key] = tmp_total

        return result

    else:
        print('login first')
        login_page()
        res = get_statistics()
        return res


def get_newsBM(ids="1122,1094,1566,1101,1127"):
    """
    获取报纸头版封面
    佛山(1400), 其他
    1253,2132,1008,1088,1122,1094,1566,1101,1127,1400
    """
    #  佛山日报放在最后
    ids += ",1400"
    url = 'http://dp.hzfanews.com/api/screen_api.ashx?whatDo=GetPaperWithNewBM&isMainCity=0&paperDate=0&paperid={ids}'.format(
        ids=ids
    )
    res = session_dp.get(url)
    try:
        res = res.json()
        has_login = res.get('Succeed')
    except:
        has_login = False

    if has_login:
        return res
    else:
        print('login first')
        login_dp_page()
        res = get_newsBM(ids=ids)
        return res



def get_foshanNews(num=5, start=0):
    """
    获取佛山本地文章数据，还有传播数据
    """
    url = 'http://foshanplus.hzfanews.com/Search/GetList?whatDo=getMediaFocusArticle&id=95478&start={start}&limit={limit}&orderby=updatetime+desc'.format(
        limit=num,
        start=start
    )
    res = session.get(url)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        return res
    else:
        print('login first')
        login_page()
        res = get_foshanNews(num=num, start=start)
        return res


def get_diffusion_info(articleid, updatetime):
    """
    获取佛山地区某一文章具体传播数据
    """
    url = 'http://foshanplus.hzfanews.com/api/hotfind.ashx?whatDo=getMediaFocusChartData&articleid={articleid}&updatetime={updatetime}'.format(
        articleid=articleid,
        updatetime=updatetime
    )
    res = session.get(url)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        return res
    else:
        print('login first')
        login_page()
        res = get_diffusion_info(articleid, updatetime)
        return res



def get_emotion_info(channel_id, emotion_id, start_date, end_date):
    url = "http://dp.hzfanews.com/api/screen_api.ashx?whatDo=QuantizedDataSum&channelid={channel_id}&startDate={start_date}&endDate={end_date}&emotion={emotion_id}&group=paperdate".format(
        channel_id=channel_id,
        emotion_id=emotion_id,
        start_date=start_date,
        end_date=end_date
    )

    res = session_dp.get(url)
    return res.json()



def hot_word(count="10", exclude=None):
    """
    参数：
		appid	用户Key，填 9a57578ead534096
		applicationid	用户ID，填342
		time	当前时间戳,10位
		type	计算类型,填3
		columnid	凡闻栏目ID,112761
		count	返回数量,填10就行
		exclude	排除词,多个词用英文逗号隔开
		sign	MD5加密数据，加密的数据为：按参数字母正排序的所有 参数值 追加，
		再加上客户密码（d5278bfb4f354564acbcfef9239f4c2c）。必须为32位大写。

	返回值(没写的值忽略):	
		code	200代表正确
		msg	
		data
			key	词
			Value	频率
    """

    url = "http://testapi.cnfanews.com:8000/Response/Screen/Column.ashx?"

    passwd = "d5278bfb4f354564acbcfef9239f4c2c"

    data = {
        'appid': "9a57578ead534096",
        'applicationid': "342",
        'columnid': "112761",
        "count": count,
        "exclude":  exclude if exclude else "_",
        'time': str(int(time.time())),
        'type': "3",
    }
    str_data = "".join(data.values())
    str_data += passwd
    m1 = hashlib.md5()
    m1.update(str_data.encode(encoding='UTF-8'))

    data['sign'] = m1.hexdigest()
    data['sign'] = data['sign'].upper()
    for key,value in data.items():
        url += key + "=" + value + "&"
    res = session.get(
        url,
    )
    return res.json()


def foshan_news_api(count='20', start='0'):

    url = "http://testapi.cnfanews.com:8000/DataAPI.aspx?"
    passwd = "0ad6c8098e59461e96da91db828f5177"

    data = {
        'appid': '72d0cfa26f0f422a',
        'applicationid': '367',
        'id':start, # 首次访问填0,以后每次传上一次返回 最大的id值
        'maxnum': count,
        'time': str(int(time.time())),
        'type': "1",  # 1:json, 2:xml
    }

    str_data = "".join(data.values())
    str_data += passwd
    m1 = hashlib.md5()
    m1.update(str_data.encode(encoding='UTF-8'))

    data['sign'] = m1.hexdigest()
    data['sign'] = data['sign'].upper()
    for key, value in data.items():
        url += key + "=" + value + "&"

    res = session.get(
        url[:-1]
    )
    return res.json()

def get_newsroute(id, startDate, endDate, start=0, limit=500):
    """
    获取佛山日报的传播数据， limit=获取文章数量，返回数据按照转发媒体数量排序,
    但这个不是返回TOP N数据，如果只拿10条，就是最新10条数据，然后再排序,
    所以为了拿到比较好看数据，limit默认100

    :return: 
    """
    pages = {
        '53215': '佛山日报',
        '76149': '珠江时报',
        '76150': '珠江商报',
        '144486': '（F）佛山plus'
    }
    if not id in pages.keys():
        page_id = '53215'
    else:
        page_id = id

    url = 'https://foshanplus.hzfanews.com/Command/dataJson.aspx'

    data = {
        'whatDo': 'GetSearchSimilarityListForReprintedNewVision',
        'id':page_id,
        'name': pages[page_id],
        'start':start,
        'limit':limit,
        'startDate': startDate,
        'endDate': endDate,
        'cityID': 0,
        'source': 0,
        'markinfo': 1,
        'listKeyType': 50,
    }
    res = session.post(url, data=data)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        return res
    else:
        print('login first')
        login_page()
        res = get_newsroute(id=id, startDate=startDate, endDate=endDate, start=start, limit=limit)
        return res



def advancedsearch(allKey, anyKey, startDate, endDate, exceptKey="", start=0, limit=20, selType="",
                   searchRange=1, searchResultType=0, searchResultSlop=2, original=2, emotion=-1, haveImage=-1):
    """
    文章高级搜索
    :param allKey: 包含全部关键词 
    :param anyKey: 包含任意关键词
    :param exceptKey: 不包含的关键词
    :param start: 分页功能,数据从第几个返回,默认0
    :param limit: 返回文章数量
    :param startDate: 搜索开始时间
    :param endDate: 搜索结束时间
    :param selType: 文章来源  website:网站,news:报纸,weibo:微博,weixin：微信,webbbs：论坛,webapp：app, 默认:""
    :param searchRange:  关键词匹配地方  1:全文, 2:标题
    :param searchResultType: 匹配模式   0 精确, 100 模糊
    :param searchResultSlop: 匹配数量  默认:2
    :param original: 文章原创或转载  2:全部, 0:转载, 1:原创
    :param emotion: 文章情感   全部:-1, 1:负面 , 51:正面
    :param haveImage: 是否有图片 -1:全部, 0:无图, 1:有图
    :return: 
    """

    data = {
        "whatDo": "AdvancedSearch",
        "allKey": allKey,
        "anyKey": anyKey,
        "exceptKey": exceptKey,
        "start": start,
        "limit": limit,
        "startDate": startDate,
        "endDate": endDate,
        "cheekedType": 0,
        "paperIDs": "",
        "paperIDsUN": "",
        "markinfo": 1,
        "retType": "2,5",
        "orderby": "updatetime desc",
        "searchRange": searchRange, # 1:全文, 2:标题
        "selType": selType,
        "searchResultType": searchResultSlop,  # 0 精确, 100 模糊
        "searchResultSlop": searchResultSlop,  # 匹配数量
        "mediaNameList": "",
        "original": original,  # 2:全部, 0:转载　1:原创
        "emotion": emotion,   # 1:负面 , 51:　正面
        "emotionValueP": 50,
        "emotionValueN": 50,
        "haveImage": haveImage,  # -1:全部, 0:无图, 1:有图
        "cityId": "",
        "wordsNumRange": ",",
        "originSource": "",
        "exceptMedia": "",
    }

    url = 'http://foshanplus.hzfanews.com/api/ArticleList.ashx'

    res = session.post(url, data=data)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        return res
    else:
        print('login first')
        login_page()
        res = advancedsearch(allKey, anyKey, startDate, endDate, exceptKey=exceptKey, start=start, limit=limit,
                             selType=selType, searchRange=searchRange, searchResultType=searchResultType,
                             searchResultSlop=searchResultSlop, original=original, emotion=emotion, haveImage=haveImage)
        return res



def person_news(personid, count=20, curPage=1):
    """
    领导人相关文章列表
    :return: 
    """
    url = 'http://gov.hzfanews.com/Controller/json.ashx?action=getArticleByPersonId'
    data = {
        "personId": personid,
        "pageSize": count,
        "curPage": curPage,
        "sortName": "",
        "sortOrder": "",
    }
    res = session.post(url, data=data)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        return res
    else:
        print('login first')
        login_page()
        res = person_news()
        return res


def person_news_keyword(personid, start, end, count=10):
    """
    领导人文章词频统计
    :return: 
    """
    url = 'http://gov.hzfanews.com/Controller/json.ashx?action=getWord&personid={personid}&count={count}&startdate={startdate}&enddate={enddate}'.format(
        personid=personid,
        count=count,
        startdate=start,
        enddate=end
    )

    res = session.get(url)
    return res.json()


def person_actical(item_id, person):
    """
    领导人相关文章
    :param item_id: 文章id 
    :param person: 领导人名字, 方便文章内容中加粗名字
    :return: 
    """
    url = "http://gov.hzfanews.com/admin/ArticleInfo.aspx?id={id}&eventname={event}&personname={person}".format(
        id=item_id,
        event="最新消息",
        person=person
    )
    res = session_gov.get(url)

    res = res.html.find('#wrap',first=True)

    media = res.find('span', first=True).text
    media = media.replace('/', '').strip()

    updatetime = res.find('#updatetime',first=True).text
    title = res.find('.title', first=True).text
    content = res.find('.endText', first=True).text

    return {
        'updatetime': updatetime,
        'media': media,
        'title': title,
        'content': content,
    }



def get_person_id():
    """
    返回账号收藏的领导人列表
    :return: 
    """
    url = "http://gov.hzfanews.com/Controller/json.ashx?action=getLatestArticle&pageSize=10&curPage=1&sort=duty"
    res = session_gov.get(url)
    try:
        res = res.json()
        has_login = res.get('Succeed', True)
    except:
        print('login first')
        login_gov()
        res = get_person_id()
        return res


    if has_login:
        return res
    else:
        print('login first')
        login_gov()
        res = get_person_id()
        return res



def get_foshan_qu_news(id, startDate, endDate, start=0, limit=20, selType="", keyWords="", classify=""):
    """
    各区正负面新闻统计
    :param id: 频道: 
    {'禅城正面':'86324','禅城负面':'86323','南海正面':'86325','南海负面':'86326','顺德正面':'86327',
    '顺德负面':'86328','高明正面':'86329','高明负面':'86330','三水正面':'86331','三水负面':'86332'} 
    :param startDate: 
    :param endDate: 
    :param start: 
    :param limit: 
    :param selType: 文章来源  website:网站,news:报纸,weibo:微博,weixin：微信,webbbs：论坛,webapp：app, 默认:""
    :param keyWords: 
    :param classify: 分类:　社会, 体育, 经济等
    :return: 
    """
    url = "http://foshanplus.hzfanews.com/api/ArticleList.ashx?whatDo=getArticleList&id={id}&start={start}&" \
          "limit={limit}&startDate={startDate}&endDate={endDate}&orderby=updatetime+desc&markinfo=1&" \
          "haveImage=-1&emotion=-1&selType={selType}&keyWords={keywords}&channelType=&classify={classify}".format(
        id=id,
        start=start,
        limit=limit,
        startDate=startDate,
        endDate=endDate,
        selType=selType,
        keywords=keyWords,
        classify=classify,
    )
    res = session.get(url)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        return res
    else:
        print('login first')
        login_page()
        res = get_foshan_qu_news(id=id, startDate=startDate, endDate=endDate, classify=classify,
                                 start=start, limit=limit, selType=selType, keyWords=keyWords)
        return res


def get_eventlist():
    """
    获取专题列表
    :return: 
    """
    url = 'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetEventList'
    res = session.get(url)
    res = res.json()
    has_login = res.get('Succeed', True)
    if has_login:
        return res
    else:
        print('login first')
        login_page()
        res = get_eventlist()
        return res

def get_event_detail(paramets=None, url_ids=None):
    """
    获取专题详细内容
    链接集合
    文章总数，媒体总数：http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelid=127409&whatDo=GetEventStatCount
    原创，转载：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetOriginalStat&channelid=127409
    发稿量：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetTop10Media&channelid=127409
    网站首发报道：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetFirstArticle&channelid=127409&searchType=websiteall
    网站最新报道：http://foshanplus.hzfanews.com/api/ArticleList.ashx?whatDo=getArticleList&id=127408&start=0&limit=1&startDate=2019-10-13&endDate=1900-01-01&selType=&markinfo=1&simply=1
    微信首发报道：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetFirstArticle&channelid=127408&searchType=weixin%2Cweixingov%2Cweixinentprise%2Cweixinprofession%2Cweixinpersonalmedia
    媒体传播力统计：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetTop10Source&channelid=127408
    词频统计：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetWordStatByCount&channelid=127408
    发展趋势：http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelId=127408&whatDo=GetEventTraceChartData&statType=13
    热度趋势：http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelId=127408&whatDo=getMediaFocusChartData
    情感趋势：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetEmotionCurve&channelid=127408&statType=13
    地域咨询：http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelid=127408&whatDo=GetCityArticleCount
    地域相关新闻列表：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetArticleByProvince&province=%E5%B1%B1%E4%B8%9C&channelid=127408&startDate=2019-10-13
    新闻相关图片：http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetEventPhotos&channelid=127362
    :param id: 
    :return: 
    """
    urls = [
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelid={id}&whatDo=GetEventStatCount','name':"文章媒体总数"},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetOriginalStat&channelid={id}','name':"原创转载总数"},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetTop10Media&channelid={id}','name':"媒体发稿量排序"},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetFirstArticle&channelid={id}&searchType=websiteall','name':"网站首发报道"},
        {'url':'http://foshanplus.hzfanews.com/api/ArticleList.ashx?whatDo=getArticleList&id={id}&start=0&'+
        'limit=1&startDate={today}&endDate=1900-01-01&selType=&markinfo=1&simply=1','name':'网站最新报道'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetFirstArticle&channelid={id}&'+
        'searchType=weixin%2Cweixingov%2Cweixinentprise%2Cweixinprofession%2Cweixinpersonalmedia','name':'微信首发报道'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetTop10Source&channelid={id}','name':'媒体传播力统计'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetWordStatByCount&channelid={id}','name':'词频统计'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelId={id}&whatDo=GetEventTraceChartData&statType=13','name':'发展趋势'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelId={id}&whatDo=getMediaFocusChartData','name':'热度趋势'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetEmotionCurve&channelid={id}&statType=13','name':'情感趋势'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?channelid={id}&whatDo=GetCityArticleCount','name':'地域咨询'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetEventPhotos&channelid={id}', 'name':'新闻相关图片'},
        {'url':'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=GetArticleByProvince&province={province}&'+
        'channelid={id}&startDate={today}','name':'地域相关新闻列表'}
    ]

    if not paramets:
        return urls

    if not url_ids or type(url_ids) != list:
        url_ids = range(0, len(urls)-1)

    result = {}
    for u_id in url_ids:
        if u_id < len(urls):
            try:
                url = urls[u_id]['url'].format(**paramets)
                res = session.get(url)
                res = res.json()
                if type(res) is dict:
                    has_login = res.get('Succeed', True)
                else:
                    has_login = True
                if not has_login:
                    login_page()
                    res = session.get(url)
                    res = res.json()
                result[urls[u_id]['name']] = res
            except Exception as e:
                result[urls[u_id]['name']] = {'Successd':'False', 'Msg': str(e)}

    return result


# 凡闻city代码
def get_city_code():
    # 城市列表
    login_page()
    city_code = {}
    province_code = {}
    url = 'http://foshanplus.hzfanews.com/v2/js/city.js'
    res = session.get(url)
    provinces = res.json()

    for province in provinces['city']:
        province_code[province['provinces']] = province['name']
        code = province.get('cityID')
        if code:
            city_code[str(code)] = province['name']

        try:
            url = 'http://foshanplus.hzfanews.com/Command/dataJson.aspx?whatDo=getCity&province={}'.format(province['name'])
            res = session.get(url)
            sub_cities = res.json()
            for sub_city in sub_cities:
                city_code[str(sub_city['cityID'])] = sub_city['city']
        except:
            print(province['name'], url)

    #has_login = res.get('Succeed', False)
    # if not has_login:
    #     return res
    # else:
    #     print('login first')
    #     login_page()
    #     res = city_code()
    #     return res
    return {'citys': city_code, 'provinces': province_code}

import pickle

def save_pkl(file_name, data):
    with open('{}.pkl'.format(file_name), 'wb') as f:
        pickle.dump(data, f)

def load_pkl(file_name):
    content = None
    with open(file_name, 'rb') as f:
        content = pickle.load(f)
    return content


def source_static(page_id, startDate, endDate):
    url = f'http://foshanplus.hzfanews.com/command/datajson.aspx?whatDo=GetReprintedMediaStatList&channelid={page_id}&' \
          f'keyWord=&start={startDate}&end={endDate}&orderby=&isexclude=1&count=5000'

    res = session.get(url)
    res = res.json()
    if res.get('Succeed', True):
        return res
    else:
        print('login first')
        login_page()
        res = source_static(page_id=page_id, startDate=startDate, endDate=endDate)
        return res

if __name__ == '__main__':
    #login_gov()
    # res = advancedsearch("佛山", "南海", "2019-08-16", "2019-08-19", selType="webapp")
    # # qudict = {'禅城正面':'86324','禅城负面':'86323','南海正面':'86325','南海负面':'86326','顺德正面':'86327',
    # # '顺德负面':'86328','高明正面':'86329','高明负面':'86330','三水正面':'86331','三水负面':'86332'}
    # # qudict = {'南海正面': '86325', '三水正面': '86331', '微信': "14162"}
    # # for key, id in qudict.items():
    # #     print('-------%s : %s---------' % (key, id))
    # #     res = get_foshan_qu_news(id=id, startDate='2019-08-19', endDate='2019-08-19', limit=10,  classify='经济')
    #
    # print('总新闻数量:', res['obj']['total'])
    # for item in res['obj']['rows']:
    #     print(item['class1'])
    #print(hot_word())
    # begin_id = '370441759'
    # for i in range(0, 100):
    #     res = foshan_news_api(count='100', start=begin_id)
    #     count_1 = 0
    #     count_2 = 0
    #     for data in res['data']:
    #         if data['category_id'] == '122576':
    #             count_1 += 1
    #         if data['category_id'] == '121293':
    #             count_2 += 1
    #
    #     print('reslut:{},start: {} -->> 122576: {} , 121293: {}'.format(res['result'], begin_id, count_1, count_2))
    #
    #     begin_id = res['data'][-1]['id']
    # save_pkl('city_code', get_city_code())
    #res = load_pkl('city_code.pkl')
    #print(res)
    #print(get_person_id()['focusPerson'])
    #print(foshan_news_api(start='384325527'))
    #print(get_event_detail({'id':'127521','today':'2019-10-15'}, url_ids=[0,1,4]))

    #wenming_static()
    #print(source_static())
    res = foshan_news_api()
    print(res)
