try:
	from bs4 import BeautifulSoup
	USE_BS = True
except:
	USE_BS = False
import urllib2, re, os
import utils
import overmind

reload(utils)

#
#PARSER
#
def get_raw_page(url, local = False):
	if not local:
		req = urllib2.Request(url)
		response = urllib2.urlopen(req, None, 15)
		return response.read()
	else:
		f = open(url)
		content = f.read()
		f.close()
		return content

def get_match_divs(soup, match_type = "Ranked Solo 5v5"):
	#Normal 5v5
	found = []
	ranked_divs = soup.find_all("div", text=match_type)
	if not ranked_divs:
		return None
	for div in ranked_divs:
		cur = div.parent
		_f = None
		for i in range(10):
			try:
				if cur.parent.attrs["class"][0].find("match_win")>-1 or cur.parent.attrs["class"][0].find("match_loss") > -1 :
					found.append(cur.parent)
					break
				cur = cur.parent
			except:
				pass
	return found

def get_date_from_match_div(match_div):
	tmp = match_div.find("div", text = "Ranked Solo 5v5")
	if not tmp:
		tmp = match_div.find("div", text = "Normal 5v5")
	if not tmp:
		return "000"
	tmp = tmp.parent
	span = list(tmp.find_all("span"))[0]
	return str(span.attrs["data-hoverswitch"])

def get_outcome_from_match_div(match_div):
	if len(list(match_div.find_all("div", text="Win"))) > 0:
		return "win"
	return "loss"

def get_kda_from_match_div(match_div):
	kda = []
	tag = list(match_div.find_all("span", text="Kills"))[0]
	for x in tag.parent.find_all("strong"):
		kda.append(int(x.text))
	return kda

def get_cs_from_match_div(match_div):
	tag = list(match_div.find_all("div", text="Minions"))[0]
	strong = list(tag.parent.find_all("strong"))[0]
	cs = int(strong.text)
	return cs

def clean_champ_name(name):
	return name.lower().strip().replace(" ","").replace("'", "").replace(".", "")

def get_champ_from_match_div(match_div):
	div = match_div.find("div", class_="summoner_icon_64")
	return clean_champ_name(str(div.a.attrs["href"].rpartition("/")[2]))

def get_team_from_match_div(match_div, current_id, current_summoner_name):
	def _get_summoner_ids(tag):
		team = []
		for a in tag.find_all("a"):
			if a.attrs["href"].find("champion") > -1:
				champ = str(a.attrs["href"].rpartition("/")[2])
				tr = a.parent.parent.parent
				for suba in tr.find_all("a"):
					if suba.attrs["href"].find("summoner") > -1:
						id = str(suba.attrs["href"]).rpartition("/")[2]
					else:
						id = current_id
				team.append((champ, id))
		return team

	for td in match_div.find_all("td"):
		if td.string == current_summoner_name:
			break
	allies_table = td.parent.parent
	teams_tr = allies_table.parent.parent.parent
	enemy_table = list(teams_tr.find_all("table"))[1]
	teams = {}
	teams["allies"] = _get_summoner_ids(allies_table)
	teams["enemies"] = _get_summoner_ids(enemy_table)
	return teams

def get_stats_dict_from_user_page(soup):
	all_stats = {}
	for table in soup.find_all("table", id="ranked_stats"):
	   	table_name = str(table["class"][1])
		headers = []
		raw_champs = []
		for th in table.thead.find_all("th"):
			headers.append(str(th.string))
		for tr in table.find_all("tr"):
			values = []
			for i, td in enumerate(tr.find_all("td")):
				if i == 0:
					champ_name = clean_champ_name(str(list(td.find_all("a"))[1].string))
					values.append(champ_name)
				else:
					values.append(str(td.string))
			if values:
				raw_champs.append(dict(zip(headers, values)))
			parsed_champs = []
			for rc in raw_champs:
				_tmp = parse_champ_stats(rc)
				parsed_champs.append(_tmp)
		all_stats[table_name.replace("_ranked_stats", "")] = parsed_champs
	return all_stats

def parse_champ_stats(c):
	role = overmind.CHAMP_ROLE.get(c["Champion"])
	if role == None:
		print "could not fine %s in CHAMP_ROLL"%(c["Champion"])
	champ = {}
	champ["name"] = c["Champion"]
	if role == "m-dps" or role == "r-dps":
		role = "dps"
	champ["role"] = role
	champ["cs"] = float(c["Creep Score"].partition("/")[0])
	champ["kills"] = float(c["Kills"].partition("/")[0])
	champ["deaths"] = float(c["Deaths"].partition("/")[0])
	champ["assists"] = float(c["Assists"].partition("/")[0])
	champ["played"] = float(c["Wins"]) + float(c["Losses"])
	champ["kill_ratio"] = champ["kills"]/(champ["deaths"]+1)
	champ["assists_ratio"] = champ["assists"]/(champ["deaths"]+1)
	return champ

def get_match_history_dict(soup, id, match_type="Ranked Solo 5v5", team = True):
	#Normal 5v5
	match_divs = get_match_divs(soup, match_type=match_type)
	if not match_divs:
		return None
	matches = []
	for m in match_divs:
		tmp = {}
		tmp["date"] = get_date_from_match_div(m)
		tmp["kda"] = get_kda_from_match_div(m)
		tmp["cs"] = get_cs_from_match_div(m)
		tmp["outcome"] = get_outcome_from_match_div(m)
		tmp["champion"] = get_champ_from_match_div(m)
		if team:
			tmp["team"] = get_team_from_match_div(m, id, get_meta_from_summoner_page(soup))
		matches.append(tmp)
	return matches

def get_meta_from_summoner_page(soup):
	title_bar = soup.find("div", class_="summoner_titlebar")
	second_div = list(title_bar.find_all("div"))[1]
	name = unicode(second_div.find("div").text)
	return name

def get_summoner_soup_page(id, cache = True):
	if not USE_BS:
		return None
	cache_file = os.path.join(overmind.CACHE_DIR, id)
	if os.path.isfile(cache_file) and cache:
		raw = get_raw_page(cache_file, local=True)
		#print "read from cache: %s"%id
	else:
		url = "http://www.lolking.net/summoner/na/%s"%(id)
		raw = get_raw_page(url, local=False)
		file = open(cache_file, "w")
		file.write(raw)
		file.close()
		#print "read from internet: %s"%id
	soup = BeautifulSoup(raw)
	return soup

def get_summoner_data(id, data_cache = True, soup_cache = True):
	cache_file = os.path.join(overmind.SUMMONERS_DIR, id)
	if os.path.isfile(cache_file) and data_cache:
		data = utils.unpickle_data(cache_file)
	elif USE_BS:
		soup = get_summoner_soup_page(id, soup_cache)
		data={}
		try:
			data["meta"] = get_meta_from_summoner_page(soup)
		except:
			print "could not get name for %s"%(id)
			data["meta"] = "unknown"
		data["stats"] = get_stats_dict_from_user_page(soup)
		data["ranked_mh"] = get_match_history_dict(soup,id, match_type="Ranked Solo 5v5", team = False)
		data["normal_mh"] = get_match_history_dict(soup,id, match_type="Normal 5v5", team = False)
		data["custom"] = get_match_history_dict(soup, id, match_type = "Custom", team = False)
		utils.pickle_data(data, cache_file)
	else:
		data = None
	return data

def get_id_from_name(name, test = True):
	if test:
		return overmind.MY_ID
	try:
		if name == overmind.MY_LOL_USER_NAME:
			return  overmind.MY_ID
		url = "http://www.lolking.net/search?name=%s"%name
		print url
		raw = get_raw_page(url, local=False)
		soup = BeautifulSoup(raw)
		id = None
		for div in soup.find_all("div", class_="search_result_item"):
			for a in div.find_all("a"): 
				if a.attrs["href"].find("/summoner/na/")>-1:
					id = a.attrs["href"].replace("/summoner/na/", "")
					break
		return id
	except:
		return None
