# # coding:utf8
#
# import scrapy
# import datetime
# from BashouScrapy.wenshu import param_split
# import urllib
#
# # 从距今天几天前开始抓取,1代表昨天，2代表距离今天2天，以此类推
# DAYS_BETWEEN_BEGIN_AND_TODAY = 10
# to_be_insert_data = {
#     'Param': "",
#     'PubDate': "",
#     'Index': str(1),
#     'Page': '5',
#     'Order': urllib.quote('法院层级'),
#     'Direction': 'asc'
# }
# MID_YEAR = 2014
#
# class WenshuParamSpider(scrapy.Spider):
#     name = "wenshu_param"
#     today = datetime.datetime.now()
#     begin_day = today - datetime.timedelta(DAYS_BETWEEN_BEGIN_AND_TODAY)
#     yesterday = today - datetime.timedelta(1)
#
#     def start_requests(self):
#         for upload_date in param_split.get_date_list(self.begin_day, self.end_day):
#             to_be_insert_data["PubDate"] = upload_date
#             param_upload_date = urllib.quote('上传日期:') + upload_date + "+TO+" + upload_date
#             # 96-MID_YEAR的直接抓取，理论上很少
#             self.crawl_old_instrument(param_upload_date, MID_YEAR)
#             upload_year = int(upload_date[0:4])
#             if MID_YEAR > upload_year:
#                 continue
#             # 按照刑事民事等抓取
#             crawl_by_case_type(param_upload_date, upload_year)
#
#     def crawl_old_instrument(param_upload_date, end_year=2014):
#         for year in range(1996, end_year):
#             param_judgement_year = urllib.quote('裁判年份:') + str(year)
#             add_param([param_upload_date, param_judgement_year], True)
