from __future__ import print_function, division

import re

import requests
from selenium import webdriver
import pandas as pd

headers = {
"Accept":"*/*",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN,zh;q=0.9",
"Connection":"keep-alive",
"Cookie":"UM_distinctid=16390364185422-05e6b3f430b394-e323462-144000-16390364186d59; CNZZDATA1261430177=1274474508-1527129096-%7C1527129096",
"Host":"zq.win007.com",
"Referer":"http://zq.win007.com/cn/team/CTeamSche/735.html",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36"
}



class Spider(object):
	def __init__(self):
	## setup #
		# self.base_url = base_url
		self.driver = webdriver.Chrome()
		self.driver.implicitly_wait(30)
		self.verificationErrors = []
		self.accept_next_alert = True

	def get_all_team_data(self):
	# 先通过世界杯主页获取所有32只队的ID（构成球队URL）
		self.get_team_ids()
	#  循环爬取每一支队的比赛数据
		data = []
		for i, [team_id, team_name] in enumerate(self.team_list):
			if i == 1:
				break
			print(i, team_id, team_name)
		df = self.get_team_data(team_id, team_name)
		data.append(df)
		output = pd.concat(data)
		output.reset_index(drop=True, inplace=True)
		print(output)
		output.to_csv('data_2018WorldCup.csv', index=False, encoding='utf-8')
		self.driver.close()

	def get_team_ids(self):
		main_url = 'http://zq.win007.com/cn/CupMatch/75.html'
		self.driver.get(main_url)
		teams = self.driver.find_elements_by_xpath(".//td[@style='background-color:#fff;text-align:left;']")
		data = []
		for team in teams:
			team_id = int(team.find_element_by_xpath('.//a').get_attribute('href').split('/')[-1].split('.')[0])
			team_name = team.find_element_by_xpath('.//a').text
			print(team_id, team_name)
			data.append([team_id, team_name])
			self.team_list = pd.DataFrame(data, columns=['team_name', 'team_id'])
			self.team_list.to_excel('国家队ID.xlsx', index=False)

	def get_team_data(self, team_id, team_name):
		'''获取一个国家队的比赛数据。TODO：没有实现翻页'''
		url = 'http://zq.win007.com/cn/team/CTeamSche/%d.html'%team_id
		self.driver.get(url)
		table = self.driver.find_element_by_xpath(".//div[@id='Tech_schedule' and @class='data']")
		matches = table.find_elements_by_xpath('.//tr')
		print(len(matches))
		# 抓取比赛数据，并保存成DataFrame
		data = []
		for i, match in enumerate(matches):
			if i == 0:
				headers = match.find_elements_by_xpath('.//th')
				h1, h2, h3, h4, h5 = headers[0].text, headers[1].text, headers[2].text, headers[3].text, headers[4].text
				print(h1, h2, h3, h4, h5)
				continue
			try:
				info = match.find_elements_by_xpath('.//td')
				cup = str(info[0].text.encode('utf-8'))
				match_time = str(info[1].text.encode('utf-8'))
				home_team = str(info[2].text.encode('utf-8'))
				fts = info[3].text
				print('-', cup, '-')
				fs_A, fs_B = int(fts.split('-')[0]), int(fts.split('-')[1])
				away_team = str(info[4].text.encode('utf-8'))
				print(cup, match_time, home_team, away_team, fs_A, fs_B)
				data.append([cup, match_time, home_team, away_team, fs_A, fs_B, team_name])
			except:
				break
		df = pd.DataFrame(data, columns=['赛事', '时间', '主队', '客队', '主队进球', '客队进球', '国家队名'])
		print(df)
		return df

def my_world():
	driver = webdriver.Chrome()
	main_url = 'http://zq.win007.com/cn/CupMatch/75.html'
	driver.get(main_url)
	teams = driver.find_elements_by_xpath(".//td[@style='background-color:#fff;text-align:left;']")

	all_data = []

	i = 1
	for team in teams:

		team_id = int(team.find_element_by_xpath('.//a').get_attribute('href').split('/')[-1].split('.')[0])
		team_name = team.find_element_by_xpath('.//a').text

		res_1 = requests.get(f"http://zq.win007.com/cn/team/TeamScheAjax.aspx?TeamID={team_id}&pageNo=1&flesh=0.9791464780566963",
							 headers=headers).text
		res_2 = requests.get(f"http://zq.win007.com/cn/team/TeamScheAjax.aspx?TeamID={team_id}&pageNo=2&flesh=0.9791464780566963",
							 headers=headers).text

		ret_1 = re.search(r'teamPageData.*',res_1).group()[15:-1]
		ret_2 = re.search(r'teamPageData.*',res_2).group()[15:-1]

		# ret_1 = res_1[44:-2]
		# ret_2 = res_2[44:-2]
		match_list_1 = eval(ret_1)
		match_list_2 = eval(ret_2)

		data = []
		for match in match_list_1:
			item = {}

			# print(match)
			cup = match[8]
			match_time = match[3]

			home_team = match[11]
			away_team = match[14]

			if match[6] != '' and match[6].split('-')[0] != '取消':
				fs_A = match[6].split('-')[0]
				fs_B = match[6].split('-')[1]

				data.append([cup, match_time, home_team, away_team, fs_A, fs_B, team_name])


			df = pd.DataFrame(data, columns=['赛事', '时间', '主队', '客队', '主队进球', '客队进球', '国家队名'])

			# print(df)
		all_data.append(df)


		for match in match_list_2:

			cup = match[8]
			match_time = match[3]

			home_team = match[11]
			away_team = match[14]

			if match[6] != '' and match[6].split('-')[0] != '取消':
				fs_A = match[6].split('-')[0]
				fs_B = match[6].split('-')[1]

				data.append([cup, match_time, home_team, away_team, fs_A, fs_B, team_name])


			#列表转换为df

			df = pd.DataFrame(data, columns=['赛事', '时间', '主队', '客队', '主队进球', '客队进球', '国家队名'])

		all_data.append(df)

		print(all_data)
		i += 1
	output = pd.concat(all_data)
	output.reset_index(drop=True, inplace=True)
	print(output)
	output.to_csv('data_2018WorldCup.csv', index=False, encoding='utf-8')


def get_team_ids():
	driver = webdriver.Chrome()
	main_url = 'http://zq.win007.com/cn/CupMatch/75.html'
	driver.get(main_url)
	teams = driver.find_elements_by_xpath(".//td[@style='background-color:#fff;text-align:left;']")
	data = []
	for team in teams:
		team_id = int(team.find_element_by_xpath('.//a').get_attribute('href').split('/')[-1].split('.')[0])
		team_name = team.find_element_by_xpath('.//a').text
		print(team_id, team_name)
		yield team_id, team_name
	# 	data.append([team_id, team_name])
	# 	team_list = pd.DataFrame(data, columns=['team_name', 'team_id'])
	# team_list.to_excel('国家队ID.xlsx', index=False)

if __name__ == '__main__':
	# spider = Spider()
	# # 第一步：抓2018世界杯球队的ID。第二部：循环抓取每一支队的比赛数据。
	# spider.get_all_team_data()
	my_world()




