# -*- coding: utf-8 -*-

import scrapy
from bs4 import BeautifulSoup
from finance.items import StockItem
from scrapy.http import  Request
import re

import json
import os

from scrapy import optional_features
optional_features.remove('boto')

class StockSpider(scrapy.Spider):
	name = "stock_"
	allowed_demains = ["sina.com.cn"]
	#start_urls = ["http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodes"]

	def start_requests(self):
		url = "http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodes"
		return [scrapy.FormRequest(url,callback=self.parse)]

	def parse(self,response):
		vec = []
		html_doc = response.body
		ss = html_doc.decode('GBK').encode('utf-8')

		ss = re.sub(r"(,?)(\w+?)\s+?:", r"\1'\2' :", ss);
		ss = ss.replace("'", "\"");

		data = json.loads(ss)
		tree = data[1]
		for id in range(len(tree)):
			ret_tree = self.tree_parse(tree[id])
			vec += ret_tree
		for id in range(len(vec)):
			yield Request(url = vec[id][0],callback=vec[id][1])


	def tree_parse(self,obj_json):
		ret = []
		name = obj_json[0]

		pattern = re.compile("<font.*>(.*)<\/font>")
		res = pattern.search(name)
		if res is not None and len(res.groups()) > 0:
			name = res.groups()[0]

		dir1 = "./res/"+name + "/"
		if not os.path.exists(dir1):
			os.makedirs(dir1)

		ff1 = open(dir1 + name +".txt",'w')
		ff1.write(json.dumps(obj_json[1]))
		ff1.close()
		if not isinstance(obj_json[1],list):
			return ret

		if name == u"美国股市":
			pass
		elif name == u"期货":
			#ret += self.future_parse(obj_json[1])
			pass
		else:
			for id1 in range(len(obj_json[1])):
				ret += self.node_parse(obj_json[1][id1],dir1)
		
		return ret
	
	def node_parse(self,obj_json,dir1):
		ret = []

		pattern = re.compile("<font.*>(.*)<\/font>")

		name = obj_json[0]
		res = pattern.search(name)
		if res is not None and len(res.groups()) > 0:
			name = res.groups()[0]
		dir2 = dir1 + name

		ff = open(dir2+".txt",'w')
		ff.write(json.dumps(obj_json[1]))
		ff.close()

		if not isinstance(obj_json[1],list):
			return ret

		dir2 = dir2 + "/"

		if not os.path.exists(dir2):
			os.makedirs(dir2)

		if name == u'分类':
			ret_fenlei = self.fenlei_parse(obj_json[1])
			ret += ret_fenlei		

		'''
		for id3 in range(len(obj_json[1])):
			self.link_parse(obj_json[1][id3],dir2)
		'''
		return ret

	def link_parse(self,obj_json,dir1):
		pattern = re.compile("<font.*>(.*)<\/font>")

		name = obj_json[0]
		res = pattern.search(name)
		if res is not None and len(res.groups()) > 0:
			name = res.groups()[0]
		dir2 = dir1 + name

		ff = open(dir2+".txt",'w')
		ff.write(json.dumps(obj_json))
		ff.close()

	def fenlei_parse(self,obj_json):
		ret = []
		url_head = "http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount?node="

		for id3 in range(len(obj_json)):
			code = obj_json[id3][2]
			if len(obj_json[id3]) > 4:
				code = re.sub(r'\{node: "(.*)"\}', r'\1',obj_json[id3][4])

			link = url_head + code
			ret.append([link,self.count_parse])
		return ret
	def future_parse(self,obj_json):
		url_head = "http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQFuturesCount?node="
		head = "http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?"
		ret = []
		codes = []
		for id in range(len(obj_json)):
			link = ""
			if isinstance(obj_json[id][1],list):
				for ii in range(len(obj_json[id][1])):
					codes.append(obj_json[id][1][ii][2])
			else:
				codes.append(obj_json[id][2])
	
		for code in codes:
			link = "%spage=1&num=80&sort=symbol&asc=1&node=%s&symbol=&_s_r_a=init"%(head,code)
			ret.append([link,self.futu_page_parse])

		return ret


	def count_parse(self,response):
		head = "http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?"
		html_doc = response.body

		name = response.url.split('?')[1].split('=')[1]
		html_doc = re.sub(r'\(new String\("(\d*)"\)\)', r'\1', html_doc).replace(" ","")
		if len(html_doc) == 0 or html_doc == 'null':
			return	

		count = int(html_doc)
		pages = count/80
		if count%80 > 0:
			pages += 1

		for page in range(pages):
			link = "%spage=%d&num=80&sort=symbol&asc=1&node=%s&symbol=&_s_r_a=init"%(head,page + 1,name)
			yield Request(url = link,callback=self.page_parse)

	def futu_count_parse(self,response):
		head = "http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQFuturesData?"
		link = "page=1&num=40&sort=symbol&asc=1&node=zly_qh&_s_r_a=init"


	def page_parse(self,response):
		html_doc = response.body
		ss = html_doc.decode('GBK').encode('utf-8')

		params = response.url.split('?')[1].split('&')
		code = params[4].split("=")[1]
		page = params[0].split("=")[1]
		name = "./res/%s_%s.txt"%(code,page)

		ss = re.sub(r"(,?)(\w+?)\s*?:", r"\1'\2':", ss)
		ss = ss.replace("'", "\"")
		ss = re.sub(r'""(\d*)":"(\d*)":(\d*)"', r'"\1:\2:\3"', ss)		


		data = json.loads(ss)		
		if data is None:
			return None

		item = StockItem()
		item['item_type'] = 'stock_classify'
		item['item_code'] = code
		item['data'] = []
		for row in data:
			item['data'].append(row)
		return item

		ff = open(name,'w')
		ff.write(json.dumps(data))
		ff.close()
		if not os.path.exists("./res/his_stock/"):
			os.makedirs("./res/his_stock/")
		'''
		codes = [row['code'] for row in data]
		for code_ in codes:
			head = "http://vip.stock.finance.sina.com.cn/corp/go.php/vMS_MarketHistory/stockid/%s.phtml"%(code_)
			ff = lambda response: self.his_stock_year_parse(code_,response)
			yield Request(url = head,callback = ff)
		'''

	def futu_page_parse(self,response):
		return None
		html_doc = response.body
		ss = html_doc.decode('GBK').encode('utf-8')

		params = response.url.split('?')[1].split('&')
		code = params[4].split("=")[1]
		page = params[0].split("=")[1]
		name = "./res/%s_%s.txt"%(code,page)
		print name

		ss = re.sub(r"(,?)(\w+?)\s*?:", r"\1'\2':", ss)
		ss = ss.replace("'", "\"")
		ss = re.sub(r'""(\d*)":"(\d*)":(\d*)"', r'"\1:\2:\3"', ss)		

		data = json.loads(ss)

		ff = open(name,'w')
		ff.write(json.dumps(data))
		ff.close()

		if data is None:
			return None

		codes = [row['symbol'] for row in data]
		fstock = open('./futu_code.txt','a')
		fstock.write(code+":::"+",".join(codes)+'\n')
		fstock.close()

	def his_stock_year_parse(self,code_,response):
		code = code_#response.meta["code"]
		head = "http://vip.stock.finance.sina.com.cn/corp/go.php/vMS_MarketHistory/stockid/%s.phtml"%(code)
		years = response.xpath('//select[@name="year"]/option/text()').extract()
		print response.meta["code"],"----",years
		
		for year in years:
			for season in range(4):
				link = "%s?year=%s&jidu=%d"%(self.head,year,season+1)
				req = Request(url = link,callback = self.his_stock_day_parse)
				req.meta["code"] = code
				yield req

	def his_stock_day_parse(self,response):
		code = response.meta["code"]
		#print "code=",code,"?",response.url.split("?")[1]
		html_doc = response.body
		soup = BeautifulSoup(html_doc,'lxml')
		tagHold = soup.find('table',{'id':'FundHoldSharesTable'})
		if tagHold is None:
			return None

		ff = open("./res/his_stock/" + code+".txt",'a')

		strRe = re.compile(r"<(?!((/?s?li)|(/?s?ul)|(/?s?td)|(/?s?img)|(/?s?br)|(/?s?span)|(/?s?b)))[^>]+>")
		index = 0
		tagRows = tagHold.findAll('tr')
		for row in tagRows:
			tagItems = row.findAll('td')
			if tagItems is None or len(tagItems) != 7:
				continue
			index = index + 1
			if index == 1:
				continue
			strRow = str(row)
			strRow = strRe.sub('',strRow)
			sel = Selector(text = strRow)
			res = sel.xpath('//td/text()').extract()
			res = [item.replace('\t','').replace('\n','').replace('\r','') for item in res]
			ff.write(";".join(res) + "\n")
		ff.close()