import requests
from lxml import etree
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from multiprocessing import Pool
import time
import pymysql
import re
import json
import warnings
from bs4 import BeautifulSoup
class JuChao:
	def __init__(self):
		self.headers = {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16"}
		self.baseurl = 'http://www.cninfo.com.cn/cninfo-new/information/companylist'
		self.db = pymysql.connect('localhost','root','123456',charset='utf8')
		self.cursor = self.db.cursor()
	#获取上市公司url
	def getUrl(self,url):
		
		r = requests.get(url,headers=self.headers)
		r.encoding='gb2312'
		html = r.text
		parseHtml = etree.HTML(html)
		url_list = parseHtml.xpath('//*[@id="con-a-1"]/ul/li/a/@href')

		#遍历获取的公司url
		for u in url_list:
			self.getPage(u)
			
	#获取公司概况url
	def getPage(self,u):

		chrome_options = Options()
		chrome_options.add_argument('--headless')
		chrome_options.add_argument('--disable-gpu')
		driver = webdriver.Chrome(options=chrome_options)
		driver.get(u)

		driver.find_element_by_id('brief').click()
		time.sleep(1)
		#获取公司概况页面
		html = driver.page_source
		
		parseHtml = etree.HTML(html)
		src = parseHtml.xpath('//div[@class="layout1"]/iframe/@src')
		# driver.close()
		# print(src[0])
		url = 'http://www.cninfo.com.cn/information/'+str(src[0])
		
		r = requests.get(url,headers=self.headers)
		r.encoding='gb2312'
		content = r.text
		
		p = re.compile('<td class="zx_data2">(.*?)</td>',re.S)
		r_list = p.findall(content)
		self.saveTomysql(r_list)


		driver.find_element_by_id('fhpg').click()
		time.sleep(1)
		#获取风红配股页面
		f_html = driver.page_source
		self.fhpgUrl(f_html,r_list)

		#获取高管人员页面
		driver.find_element_by_id('management').click()
		time.sleep(1)
		m_html = driver.page_source
		self.managementUrl(m_html,r_list)

		#获取十大股东页面
		driver.find_element_by_id('shareholders').click()
		# driver.find_element_by_xpath('//*[@id="shareholders"]/a').click()
		time.sleep(1)
		
		s_html = driver.page_source
		self.shareholderUrl(s_html,r_list)

		driver.close()
	#获取分红配股url
	def fhpgUrl(self,f_html,r_list):
		
		parseHtml = etree.HTML(f_html)
		src = parseHtml.xpath('//div[@class="layout1"]/iframe/@src')
		# driver.close()
		# print(src[0])
		url = 'http://www.cninfo.com.cn/information/'+str(src[0])
		
		r = requests.get(url,headers=self.headers)
		r.encoding='gb2312'
		content = r.text

		p = re.compile('<td class=".*?">(.*?)</td>',re.S)
		f_list = p.findall(content)
	

		c_db = 'create database if not exists JuChao;'
		u_db = 'use JuChao;'
		c_tab = 'create table if not exists fhpg(id int primary key auto_increment,\
				c_jianchen varchar(10),\
				fhnd varchar(10),\
				plan varchar(100),\
				record_date varchar(10),\
				base_date varchar(10),\
				up_date varchar(10))charset=utf8;'

		warnings.filterwarnings("error")
		try:
			self.cursor.execute(c_db)
		except Warning:
			pass
		self.cursor.execute(u_db)
		try:
			self.cursor.execute(c_tab)
		except Warning:
			pass
		
		for i in range(0,len(f_list),5):
			n = i
			f_insert = 'insert into fhpg(c_jianchen,fhnd,plan,record_date,base_date,up_date) values("%s",\
					"%s","%s","%s","%s","%s")'%(r_list[3].strip(),f_list[n].strip(),f_list[n+1].strip(),f_list[n+2].strip(),f_list[n+3].strip(),f_list[n+4].strip())
			self.cursor.execute(f_insert)
		self.db.commit()	
		
	#获取高管人员url
	def managementUrl(self,m_html,r_list):
		
		parseHtml = etree.HTML(m_html)
		src = parseHtml.xpath('//div[@class="layout1"]/iframe/@src')
		
		# print(src[0])
		url = 'http://www.cninfo.com.cn/information/'+str(src[0])
		
		r = requests.get(url,headers=self.headers)
		r.encoding='gb2312'
		content = r.text

		p = re.compile('<td class=".*?">(.*?)</td>',re.S)
		m_list = p.findall(content)

		c_db = 'create database if not exists JuChao;'
		u_db = 'use JuChao;'
		c_tab = 'create table if not exists management(\
					id int primary key auto_increment,\
					c_jianchen varchar(10),\
					u_name varchar(50),\
					job varchar(20),\
					b_date varchar(10),\
					sex varchar(10),\
					education varchar(10))charset=utf8;'

		warnings.filterwarnings("error")
		try:
			self.cursor.execute(c_db)
		except Warning:
			pass
		self.cursor.execute(u_db)
		try:
			self.cursor.execute(c_tab)
		except Warning:
			pass
		
		for i in range(4,len(m_list),5):
			n = i
			m_insert = 'insert into management(c_jianchen,u_name,job,b_date,sex,education) values("%s","%s","%s","%s","%s","%s")'\
						%(r_list[3].strip(),m_list[n].strip(),m_list[n+1].strip(),m_list[n+2].strip(),m_list[n+3].strip(),m_list[n+4].strip())
			self.cursor.execute(m_insert)
		self.db.commit()	
			


	#获取十大股东url
	def shareholderUrl(self,s_html,r_list):
	
		parseHtml = etree.HTML(s_html)
		src = parseHtml.xpath('//div[@class="layout1"]/iframe/@src')
		# print(src[0])
		url = 'http://www.cninfo.com.cn/information/'+str(src[0])
		chrome_options = Options()
		chrome_options.add_argument('--headless')
		chrome_options.add_argument('--disable-gpu')
		driver = webdriver.Chrome(options=chrome_options)
		driver.get(url)
		driver.find_element_by_id('shareholders').click()
		time.sleep(1)
		html = driver.page_source
		driver.close()
		parseHtml = etree.HTML(html)
		src = parseHtml.xpath('//div[@class="clear"]/iframe/@src')

		url = 'http://www.cninfo.com.cn'+str(src[0])
		r = requests.get(url,headers=self.headers)
		r.encoding='gb2312'
		content = r.text

		p = re.compile('<td class="zx_data3">(.*?)</td>',re.S)
		s_list1 = p.findall(content)

		
		c_db = 'create database if not exists JuChao;'
		u_db = 'use JuChao;'
		c_tab = 'create table if not exists shareholders(\
					id int primary key auto_increment,\
					c_jianchen varchar(10),\
					gd_name varchar(100),\
					gf_num varchar(50),\
					insto varchar(10),\
					nature_of_shares varchar(20))charset=utf8;'

		warnings.filterwarnings("error")
		try:
			self.cursor.execute(c_db)
		except Warning:
			pass
		self.cursor.execute(u_db)
		try:
			self.cursor.execute(c_tab)
		except Warning:
			pass
		
		for i in range(0,len(s_list1),4):
			n = i
			s_insert = 'insert into shareholders(c_jianchen,gd_name,gf_num,insto,nature_of_shares) values("%s","%s","%s","%s","%s")'\
						%(r_list[3].strip(),pymysql.escape_string(s_list1[n].strip()),s_list1[n+1].strip(),s_list1[n+2].strip(),s_list1[n+3].strip())
			self.cursor.execute(s_insert)
		self.db.commit()		
		print("'%s'下载完成"%r_list[3].strip())	
	#保存到数据库
	def saveTomysql(self,r_list):
		c_db = 'create database if not exists JuChao;'
		u_db = 'use JuChao;'
		c_tab = 'create table if not exists brief(id int primary key auto_increment,\
				c_name varchar(50),\
				c_jianchen varchar(10),\
				f_person varchar(10),\
				c_mishu varchar(10),\
				zc_money varchar(20))charset=utf8;'
		warnings.filterwarnings("error")
		try:
			self.cursor.execute(c_db)
		except Warning:
			pass
		self.cursor.execute(u_db)
		try:
			self.cursor.execute(c_tab)
		except Warning:
			pass
		
		b_insert = "insert into brief(c_name,c_jianchen,f_person,c_mishu,\
					zc_money) values('%s','%s','%s','%s','%s')"%(r_list[0].strip(),r_list[3].strip(),r_list[4].strip(),r_list[5].strip(),r_list[6].strip())
			
		self.cursor.execute(b_insert)
		self.db.commit()	


	def workOn(self):
		url = self.baseurl
		self.getUrl(url)

if __name__=='__main__':
	spide=JuChao()
	spide.workOn()