# coding:utf-8
import datetime
import json
import time
# import urllib2

# from spider.common import common


def get_date_list(begin, end):
    date_list = []
    d = begin
    delta = datetime.timedelta(days=1)
    while d <= end:
        date_list.append(d.strftime("%Y-%m-%d"))
        d += delta
    return date_list


# def split_by_keyword(
#         Param="%E6%A1%88%E4%BB%B6%E7%B1%BB%E5%9E%8B%3A%E6%B0%91%E4%BA%8B%E6%A1%88%E4%BB%B6%2C%E8%A3%81%E5%88%A4%E5%B9%B4%E4%BB%BD%3A2012%2C%E6%B3%95%E9%99%A2%E5%B1%82%E7%BA%A7%3A%E4%B8%AD%E7%BA%A7%E6%B3%95%E9%99%A2%2C%E6%B3%95%E9%99%A2%E5%9C%B0%E5%9F%9F%3A%E5%A4%A9%E6%B4%A5%E5%B8%82"):
#     post_data = "Param=" + Param
#     result = crawl_content("http://wenshu.court.gov.cn/List/TreeContent", {},
#                            post_data)  # http://wenshu.court.gov.cn/List/TreeContent
#     try:
#         obj = json.loads(result)
#     except Exception:
#         # common.log_json_load_error(Param)
#         return []
#     childs = obj[0]['Child']
#     resultList = []
#     for child in childs:
#         resultList.append(child["Key"])
#     return resultList


# def split_by_area(Param):
#     post_data = "Param=" + Param
#     result = crawl_content("http://wenshu.court.gov.cn/List/TreeContent", {}, post_data)
#     try:
#         obj = json.loads(result)
#     except Exception:
#         print ("get areas fail")
#         return ["北京市",
#                 "天津市",
#                 "河北省",
#                 "山西省",
#                 "内蒙古自治区",
#                 "辽宁省",
#                 "吉林省",
#                 "黑龙江省",
#                 "上海市",
#                 "江苏省",
#                 "浙江省",
#                 "安徽省",
#                 "福建省",
#                 "江西省",
#                 "山东省",
#                 "河南省",
#                 "湖北省",
#                 "湖南省",
#                 "广东省",
#                 "广西壮族自治区",
#                 "海南省",
#                 "重庆市",
#                 "四川省",
#                 "贵州省",
#                 "云南省",
#                 "西藏自治区",
#                 "陕西省",
#                 "甘肃省",
#                 "青海省",
#                 "宁夏回族自治区",
#                 "新疆维吾尔自治区",
#                 "新疆维吾尔自治区高级人民法院生产建设兵团分院"]
#     childs = obj[3]['Child']
#     resultList = []
#     for child in childs:
#         if child["Key"] != "" and child["Key"] != u"最高人民法院":
#             resultList.append(child["Key"])
#
#     return resultList


# def split_by_area(Param):
#     return [
#         "山东省",
#         "河南省",
#         "湖北省",
#         "湖南省",
#         "广东省",
#         "广西壮族自治区",
#         "海南省",
#         "重庆市",
#         "四川省",
#         "贵州省",
#         "云南省",
#         "西藏自治区",
#         "陕西省",
#         "甘肃省",
#         "青海省",
#         "宁夏回族自治区",
#         "新疆维吾尔自治区",
#         "新疆维吾尔自治区高级人民法院生产建设兵团分院"]#"北京市",
#         # "天津市",
#         # "河北省",
#         # "山西省",
#         # "内蒙古自治区",
#         # "辽宁省",
#         # "吉林省",
#         # "黑龙江省",
#         # "上海市",
#         # "江苏省",
#         # "浙江省",
#         # "安徽省",
#         # "福建省",
#         # "江西省",







# def split_by_middle_court(parval, Param, header={}):
#     header = {
#         'Host': 'wenshu.court.gov.cn',
#
#         'Origin': 'http: // wenshu.court.gov.cn',
#         'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',
#         'Accept': '*/*',
#         'Accept-Language': 'zh,en-US;q=0.7,en;q=0.3',
#         'Accept-Encoding': 'gzip, deflate',
#         'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
#         'X-Requested-With': 'XMLHttpRequest',
#         'Connection': 'keep-alive',
#         'Pragma': 'no-cache',
#         'Cache-Control': 'no-cache',
#     }
#
#     post_data = "Param=" + Param + "&" + "parval=" + parval
#     result = crawl_content("http://wenshu.court.gov.cn/List/CourtTreeContent", header, post_data)
#     try:
#         obj = json.loads(result)
#     except Exception:
#         common.log_json_load_error(Param)
#         return []
#     resultList = []
#     childs = obj[0]['Child']
#     for child in childs:
#         if child["Key"] != "":
#             resultList.append(child["Key"])
#
#     return resultList

#
# def split_by_low_court(parval,
#                        Param, header={}):
#     post_data = "Param=" + Param + "&" + "parval=" + parval
#     result = crawl_content("http://wenshu.court.gov.cn/List/CourtTreeContent", header, post_data)
#     try:
#         obj = json.loads(result)
#     except Exception:
#         common.log_json_load_error(Param)
#         return []
#     childs = obj[0]['Child']
#     resultList = []
#     for child in childs:
#         if child["Key"] != "":
#             resultList.append(child["Key"])
#     return resultList


# def crawl_content(url, header, data):
#     request = urllib2.Request(url, data, header)
#     time.sleep(5)
#     try:
#         response = urllib2.urlopen(request, timeout=10)
#     except Exception, e:
#         print e.message
#         common.log_http_error(data)
#         return "NONE"
#
#     try:
#         result = eval(response.read())
#         while result == 'remind':
#             common.parse_captcha()
#             response = urllib2.urlopen(request, timeout=10)
#             result = eval(response.read())
#     except Exception:
#         with open("eval_error.log", "a") as fp:
#             fp.write(urllib2.unquote(data))
#             fp.write("\n")
#             result = "NONE"
#     # is_ok=common.check_result(result,data)
#     # while is_ok!=0:
#     #    if is_ok==1:
#     #        result = get_page(url, header, data)
#     #        is_ok = common.check_result(result, data)
#     #    elif is_ok==2:
#     #        result="NONE"
#     #        break
#     return result
