#!/usr/bin/env python
# -*- coding:utf-8 -*-

import requests
from lxml import etree
from bs4 import BeautifulSoup
import json


class JiJinSpider(object):

	def __init__(self):
		self.api = 'http://fundsuggest.eastmoney.com/FundSearch/api/FundSearchPageAPI.ashx?m=1&key=中证500&pageindex=0&pagesize=100'
		self.url = 'http://fund.eastmoney.com/510510.html'
		self.headers = {
			"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
		}
		self.fund = None
		self.company = None
		self.funds = []

	def get_data(self, url):
		data = requests.get(url, headers=self.headers).content.decode('utf-8')
		# print(data)
		return data

	def save_page(self, page, file_name):
		with open(file_name, 'w', encoding='utf-8') as f:
			f.write(page)

	def parse_jijin_data(self, data):
		bs = BeautifulSoup(data, 'lxml')
		fund_info = bs.select('.infoOfFund table td')
		fund = {info.get_text().split('：')[0]: info.get_text().split('：')[1] for info in fund_info}
		if fund.get('跟踪标的', 0):
			gz = bs.select_one('.specialData').get_text()
			gzbd, gzwc = gz.split('|')
			fund[gzbd.split('：')[0]] = gzbd.split('：')[1]
			fund[gzwc.split('：')[0]] = gzwc.split('：')[1]
		fund_name = bs.select_one(".fundDetail-tit div").get_text()
		fund['name'], fund['index'] = fund_name.split('(')

		html = etree.HTML(data)
		fund['company_url'] = html.xpath('//div[@class="fundDetail-footer"]/ul//a[text()="基金公司"]/@href')[0]
		fund['saleinfo_url'] = html.xpath('//div[@class="fundDetail-footer"]/ul//a[text()="购买信息"]/@href')[0]

		# print(fund)
		self.fund = fund

	def parse_company_data(self, data):
		bs = BeautifulSoup(data, 'lxml')
		company_info = bs.select('.fund-info li')
		info = {info.get_text().split(':')[0].strip(): info.get_text().split(':')[1].strip() for info in company_info}
		# print(info)

		self.fund.update(info)

	def parse_sale_data(self, data):
		html = etree.HTML(data)
		info = html.xpath('//div[@class="txt_in"]/div[4]//td/text()')
		info = {info[i]: info[i+1] for i in range(0, len(info), 2)}
		# print(info)
		self.fund.update(info)

	def start(self):
		data = self.get_data(self.url)
		# self.save_page(data)
		# with open('jijin.html', 'r', encoding='utf-8') as f:
		# 	data = f.read()
		self.parse_jijin_data(data)
		data = self.get_data(self.fund['company_url'])
		# self.save_page(data, 'company.html')
		# with open('company.html', 'r', encoding='utf-8') as f:
		# 	data = f.read()
		self.parse_company_data(data)
		data = self.get_data(self.fund['saleinfo_url'])
		# self.save_page(data, 'saleinfo.html')
		self.parse_sale_data(data)
		print(self.fund)

	def start2(self):
		datas = self.get_data(self.api)
		datas = eval(datas)
		for data in datas['Datas']:
			print(data['CODE'])


spider = JiJinSpider()
spider.start2()
