import math
import re
import scrapy
from ..items import *
from copy import deepcopy
import json

'''
新增数据解析
'''


# 具体页面解析
class SseCompanyListSpider(scrapy.Spider):
	name = 'data_info_update'
	# allowed_domains = ['xx.com']
	next_flag = True
	url = 'http://query.sse.com.cn/commonQuery.do'

	def get_company_code(self):
		sql_update = '''
            select id, Guide_to, url
            from data_list where id not in 
            (select index_id from data_info)
            '''
		self.cursor.execute(sql_update)
		results = self.cursor.fetchall()
		return results

	def start_requests(self):
		all_list = self.get_company_code()
		print(len(all_list))
		for company_code in all_list:
			yield scrapy.Request(
				url=company_code['Guide_to'],
				callback=self.parse,
				meta={
					'index_id': deepcopy(company_code['id']),
					'ljbl': deepcopy(company_code['url'])
				},
				headers={
					'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
					'Host': 'www.gdzwfw.gov.cn',
					'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
				}
			)

	def clean_data(self, list_1):
		list_1 = [i.replace('\n', '').replace('\t', '') for i in list_1 if i.replace('\n', '').replace('\t', '')]
		if list_1:
			return list_1
		else:
			return []

	def json_data(self, dict_1):
		return json.dumps(dict_1, ensure_ascii=False)

	def clean_strip(self, str1):
		if str1:
			str1 = str1.strip()

		return str1

	# 具体数据信息采集
	def parse(self, response, **kwargs):
		try:
			index_id = response.meta['index_id']
			ljbl = response.meta['ljbl']
			data_dict = {
				# 立即办理
				"ljbl": "",
				# 基本信息
				'jbxx': '',
				# 受理标准
				'slbz': '',
				# 办理流程
				'bllc': '',
				# 申请材料
				'sqcl': {"sqcl": [], "zjfw": ''},
				# 咨询监督
				'zxjd': {"zxfs": "", "jdtsfs": ''},
				# 窗口办理
				'ckbl': '',
				# 收费项目信息
				'sfxmxx': '',
				# 法律依据
				'fnyj': {"sdyj": "", "ssyj": ""},
				# 权利与义务
				'qlyyw': {'sqryfxyyxql': "", "sqryfnxyxyw": ""},
				# 法律救济
				'fnjj': {"xzfy": "", "xzss": ""},
			}
			# 立即办理
			data_dict['ljbl'] = ljbl
			# 基本信息解析 --------------------------------------
			jbxx_str = ''
			tr_list = response.xpath('//h2[text()="基本信息"]/following-sibling::table/tbody/tr')
			for tr in tr_list:
				key_1 = self.clean_strip(tr.xpath('./th[1]/text()').extract_first())
				value_1 = tr.xpath('./td[1]//text()').extract()
				value_1 = self.clean_data(value_1)
				value_1 = value_1[0] if value_1 else ''
				key_2 = self.clean_strip(tr.xpath('./th[2]/text()').extract_first())
				value_2 = tr.xpath('./td[2]//text()').extract()
				value_2 = self.clean_data(value_2)
				value_2 = value_2[0] if value_2 else ''
				if key_1 and value_1:
					jbxx_str = jbxx_str + key_1 + ":" + value_1 + "\n"

				if key_2 and value_2:
					jbxx_str = jbxx_str + key_2 + ":" + value_2 + "\n"
			data_dict['jbxx'] = jbxx_str

			#  受理标准解析 ---------------------------------------
			item_slbz = {}
			tr_list = response.xpath('//h3[text()="受理范围"]/following-sibling::table/tbody/tr')
			To_accept_the_range_str = ""
			for tr in tr_list:
				key = tr.xpath('./th[1]/text()').extract_first()
				value = tr.xpath('./td[1]/p/text()').extract_first()
				if key and value:
					To_accept_the_range_str = To_accept_the_range_str + key + ":" + value + "\n"
			item_slbz['slfw'] = To_accept_the_range_str
			item_slbz['sltj'] = response.xpath(
				'//h3[text()="受理条件"]/following-sibling::p//text()').extract()
			item_slbz['sltj'] = self.clean_data(item_slbz['sltj'])
			item_slbz['sltj'] = ''.join(item_slbz['sltj']) if item_slbz['sltj'] else ''
			data_dict['slbz'] = item_slbz

			# 办理流程信息  -----------------------------------------
			bllc_list = []
			div_list = response.xpath('//*[@id="tab1"]/div/div')
			for div in div_list:
				content = div.xpath('./div//text()').extract()
				content = self.clean_data(content)
				if content:
					bllc_list.append(content)
			bllc_list = bllc_list
			data_dict['bllc'] = bllc_list

			# 申请材料信息  -----------------------------------------
			# 申请材料
			sqcl_list = []
			tr_list = response.xpath('//*[@id="cl-list"]/tr')
			for tr in tr_list:
				sqcl_dict = {}
				xh = ''.join(self.clean_data(tr.xpath('./td[1]/text()').extract_first()))
				clmc = ''.join(self.clean_data(tr.xpath('./td[2]/text()').extract_first()))
				clxs = ''.join(self.clean_data(tr.xpath('./td[3]//text()').extract()))

				clyq = ''.join(self.clean_data(tr.xpath('./td[4]//text()').extract()))
				clxz = ''.join(self.clean_data(tr.xpath('./td[5]//text()').extract()))
				qtxx = ''.join(self.clean_data(tr.xpath('./td[6]//text()').extract()))
				sqcl_dict['xh'] = xh
				sqcl_dict['clmc'] = clmc
				sqcl_dict['clxs'] = clxs
				sqcl_dict['clyq'] = clyq
				sqcl_dict['clxz'] = clxz
				sqcl_dict['qtxx'] = qtxx
				sqcl_list.append(sqcl_dict)
			data_dict['sqcl']['sqcl'] = sqcl_list
			# 中介服务
			tr_list = response.xpath('//div[@id="rereq"]/h3[text()="中介服务"]/following-sibling::table[1]/tbody/tr')
			index_len = 0
			for tr in tr_list:
				th_len = tr.xpath('./th')
				if th_len:
					# 得到长度
					index_len = len(th_len)
					break
			if index_len:
				zjfw_str = ''
				for i in range(1, index_len + 1):
					key = tr_list[0].xpath(f'./th[{i}]/div/text()').extract_first()
					value = tr_list[1].xpath(f'./td[{i}]/text()').extract_first()
					if key and value:
						zjfw_str = zjfw_str + key + ":" + value + '\n'
				data_dict['sqcl']['zjfw'] = zjfw_str

			# 咨询监督信息 ------------------------------------------
			# 咨询方式
			li_list = response.xpath('//h3[text()="咨询方式"]/following-sibling::ul[1]/li')
			zxfs_str = ''
			for li in li_list:
				key = li.xpath('./p[1]/text()').extract_first()
				value = li.xpath('./p[2]//text()').extract()
				if value:
					value = ''.join(value)
				else:
					value = ''
				if key and value:
					zxfs_str = zxfs_str + key + value + '\n'
			data_dict['zxjd']['zxfs'] = zxfs_str
			# 监督投诉方式
			li_list = response.xpath('//h3[text()="监督投诉方式"]/following-sibling::ul[1]/li')
			jdtsfs_str = ''
			for li in li_list:
				key = li.xpath('./p[1]/text()').extract_first()
				value = li.xpath('./p[2]//text()').extract()
				if value:
					value = ''.join(value)
				else:
					value = ''
				if key and value:
					jdtsfs_str = jdtsfs_str + key + value + '\n'
			data_dict['zxjd']['jdtsfs'] = jdtsfs_str

			# 窗口办理--------------------------------------
			ckbl_data = response.xpath('//h2[text()="窗口办理"]/following-sibling::div[1]//text()').extract()
			ckbl_data = self.clean_data(ckbl_data)
			ckbl_str = "\n".join(ckbl_data) if ckbl_data else ''
			data_dict['ckbl'] = ckbl_str

			# 收费项目信息----------------------------------------
			sfxmxx = response.xpath('//h2[text()="收费项目信息"]/following-sibling::*[1]//text()').extract()
			sfxmxx = self.clean_data(sfxmxx)
			sfxmxx_str = "\n".join(sfxmxx) if sfxmxx else ''
			data_dict['sfxmxx'] = sfxmxx_str

			# 法律依据-------------------------------------
			# 设定依据
			sdyj_str = ''
			tr_list = response.xpath('//div[@id="tab3"]//tr')
			# 前缀名
			slyj = ''
			for tr in tr_list:
				key = tr.xpath('./th//text()').extract()
				if len(key) == 2:
					slyj = key[0]
					key = ''.join(key)
				elif len(key) == 1:
					key = slyj + key[0]
				value = tr.xpath('./td[1]//text()').extract()
				value = self.clean_data(value)
				value = '\n'.join(value) if value else ''
				if key and value:
					sdyj_str = sdyj_str + key + ':' + value + '\n'
			data_dict['fnyj']['sdyj'] = sdyj_str
			# 实施依据
			ssyj = response.xpath('//div[@id="tab4"]//p/text()').extract_first()
			data_dict['fnyj']['ssyj'] = ssyj

			#  权利与义务------------------------
			# 申请人依法享有以下权利
			sqrql_str = response.xpath('//h3[text()="申请人依法享有以下权利"]/following-sibling::p[1]/text()').extract()
			sqrql_str = self.clean_data(sqrql_str)
			sqrql_str = ''.join(sqrql_str) if sqrql_str else ''
			# 申请人依法履行以下义务
			sqryw_str = response.xpath('//h3[text()="申请人依法履行以下义务"]/following-sibling::p[1]/text()').extract()
			sqryw_str = self.clean_data(sqryw_str)
			sqryw_str = ''.join(sqryw_str) if sqryw_str else ''
			data_dict['qlyyw']['sqryfxyyxql'] = sqrql_str
			data_dict['qlyyw']['sqryfnxyxyw'] = sqryw_str

			# 法律救济------------------------
			# 行政复议
			xzfy_list = response.xpath('//h3[text()="行政复议"]/following-sibling::p[1]//text()').extract()
			xzfy_list = self.clean_data(xzfy_list)
			# 行政诉讼
			xzss_list = response.xpath('//h3[text()="行政诉讼"]/following-sibling::p[1]//text()').extract()
			xzss_list = self.clean_data(xzss_list)

			data_dict['fnjj']['xzfy'] = '\n'.join(xzfy_list)
			data_dict['fnjj']['xzss'] = '\n'.join(xzss_list)

			# with open('data.json', 'w', encoding="utf-8") as f:
			# 	json.dump(data_dict, f, sort_keys=True, indent=2, ensure_ascii=False)

			item = DataInfo()
			item['index_id'] = index_id
			item['json_data'] = json.dumps(data_dict, ensure_ascii=False)
			yield item

			self.change_status("data_list",item['index_id'], "增量更新")

		except Exception as e:
			print(e)

	def change_status(self, base, index_id, tag):
		sql = """
			update {} set Update_tag = '{}' where id = {}
		""".format(base, tag, index_id)
		self.cursor.execute(sql)
		self.conn.commit()

