from Registry import Registry
from Common import Common
from lxml import etree
from io import StringIO
import json, os, re, urllib, hashlib, copy, pymongo

class SpiderCommon:
	_link_object = {'hash': '', 'path': '', 'query': '', 'time': 0, 'code':0,
					'checked': 0, 'referer': '', 'founder': '', 'size': 0, 'getted': 0}
	ignore_regexp = None
	_external_hosts = []

	def make_full_new_scan():
		Registry.get('mongo').spider_urls.update({}, {'$set': {'checked': 0}}, upsert=False, multiple=True)

	def _clear_link_obj(link):
		original = copy.copy(link)
		for item in original:
			if item not in SpiderCommon._link_object:
				del link[item]
		return link

	def links_checked(links, map):
		for link in links:
			link = SpiderCommon._clear_link_obj(link)
			link['checked'] = 1
			Registry.get('mongo').spider_urls.update({'hash': link['hash']}, {'$set': link})

	def gen_url(link, host):
		url = 'http://' + host + link['path']
		if link['query']:
			url += '?' +  link['query']
		return url

	def parse_links(content, ignore_regexp):
		links = []
		#todo form action  - но сразу чеканые
		tags = {'a': 'href', 'script': 'src', 'img': 'src', 'iframe': 'src', 'frame': 'src', 'link': 'href'}
		for tag in tags:
			tree = etree.parse(StringIO(content), etree.HTMLParser())
			tag_set = tree.xpath('//' + tag)
			if len(tag_set):
				for oneTag in tag_set:
					links.append(oneTag.get(tags[tag]))

		if Registry.isset('ignore_regexp'):
			links = SpiderCommon._clear_ignore(links)

		if Registry.isset('only_one'):
			links = SpiderCommon._clear_only_one(links)

		return links

	def _clear_ignore(links):
		original = copy.copy(links);
		for link in original:
			if link and Registry.get('ignore_regexp').search(link):
				links.remove(link)
		return links

	def _clear_only_one(links):
		original = copy.copy(links);
		for link in original:
			only_one = Registry.get('only_one')
			for rule in only_one:
				if re.search(rule['regex'], link):
					if rule['block']:
						links.remove(link)
					else:
						rule['block'] = True
			Registry.set('only_one', only_one)

		return links

	def prepare_links_for_insert(links, url, site):
		links_to_insert = []
		for link in links:
			link = urllib.parse.urlparse(link)
			if not link.scheme and \
				not link.netloc and \
				not link.path and \
				not link.query:
				continue

			if link.netloc \
				and link.netloc != site \
				and 'www.' + link.netloc != site \
				and link.netloc != 'www' + site:
				SpiderCommon._external_hosts.append(link.netloc)
				continue

			link = SpiderCommon.clear_link(link)
			link = SpiderCommon.build_path(link, url.path)
			link = SpiderCommon.clear_link(link)

			links_to_insert.append(link)

		separated_links = []
		for link in links_to_insert:
			paths = link.path.split("/")
			while len(paths) != 1:
				del paths[-1]
				separated_links.append( urllib.parse.ParseResult(scheme='', netloc='', path="/".join(paths) + '/',
																params='', query='', fragment=''))
		return links_to_insert + separated_links

	def insert_links(links, referer, site):
		links = SpiderCommon.prepare_links_for_insert(links, urllib.parse.urlparse(referer), site)
		if not len(links):
			return

		denied_schemes = Registry.get('config')['spider']['denied_schemes'].split(',')
		denied_schemes = list(map(str.strip, denied_schemes))

		for link in links:
			if 'scheme' in link and link['scheme'] in denied_schemes:
				continue

			insert = {'hash': hashlib.md5(str(link.path.strip() + link.query.strip()).encode()).hexdigest(),
					  'path': link.path.strip(),
					  'query': link.query.strip(),
					  'referer': referer,
					  'founder': 'spider',
					  'checked': 0 if SpiderCommon._link_allowed(link) else 1,
					  'getted': 0,
					  'code': 0,
					  'time': 0,
					  'size': 0}
			try:
				Registry.get('mongo').spider_urls.insert(insert)
			except pymongo.errors.DuplicateKeyError:
				pass
			except Exception as e:
				print("Can`t insert link " + insert['path'] + " " + insert['query'] + ") in db. May be it have non-utf8 symbols or somethink else. Exception message:")
				print(type(e) + "\n")

	def _link_allowed(link):
		return Registry.get('allow_regexp').search(link.path) if link.path[len(link.path)-5:].count('.') else True

	def build_path(link, url_path):
		if link.path[0:1] == '/':
			return link

		path = link.path
		path = SpiderCommon.del_file_from_path(url_path) + "/" + path

		return urllib.parse.ParseResult(scheme=link.scheme, netloc=link.netloc, path=path,
            params=link.params, query=link.query, fragment=link.fragment)

	def del_file_from_path(path):
		if path.find("/") == -1:
			return ""

		path = path.split("/")

		if path[-1].find("."):
			del path[-1]

		if len(path) == 1 and not path[0]:
			return "/"

		return "/".join(path)

	def clear_link(link):
		path = link.path
		while path and (path.find("\\") > -1 or path.find("//") > -1 or path.find("/./") > -1):
			path = path.replace("\\", "/")
			path = path.replace("//", "/")
			path = path.replace("/./", "/")

		query = link.query.replace('&amp;', '&') if link.query else ""

		back_regex = re.compile("(.*|)/(.*)/\.\./") #свойство
		reg_res = back_regex.findall(path)
		while reg_res and len(reg_res[0]) == 2:
			path = path.replace(reg_res[0][1] + "/../", "")
			reg_res = back_regex.findall(path)

		return urllib.parse.ParseResult(scheme=link.scheme, netloc=link.netloc, path=path,
            params=link.params, query=query, fragment=link.fragment)

# ==============================================================
	_header_object = {'url': '', 'header': '', 'value': ''}
	_pages = []
	_sitemap_name = ''

	def make_urls_data_json(self):
		return self._make_data_json_from_collection("spider_urls")

	def make_headers_data_json(self):
		return self._make_data_json_from_collection("spider_headers")

	def load_headers_data_json(self, file):
		return self._load_json_data_to_collection(file, 'headers')

	def load_urls_data_json(self, file):
		return self._load_json_data_to_collection(file, 'urls')






	def get_link_data_by_hash(self, hash):
		return Registry.get('mongo').spider_urls.find_one({'hash': hash})





	def prepare_external_hosts(self):
		self._external_hosts = map(str.strip, self._external_hosts)
		self._external_hosts = list(set( self._external_hosts )) # make unique

	def get_external_hosts(self):
		self.prepare_external_hosts()
		return self._external_hosts
	#TODO может этот метод сделать вариацией верхнего?
	def get_external_hosts_count(self):
		self.prepare_external_hosts()
		return len(self._external_hosts)

	def make_allowed_links(self):
		allowed_links = Registry.get('config')['spider']['allow_links'].split(',')
		Registry.set('allowed_links', self.build_expr(allowed_links))
		#todo пусть этот метод возвращает результат. Где он нужен?

#TODO уничтожить методы используемые 1 раз типа build_expr
	def build_expr(self, exts):
		expr = ''
		for ext in exts:
			expr += '\\.' + ext.strip() + '$|'
		expr = expr.rstrip('|')

		return re.compile(expr, re.I)

	def clear_old_data(self, map):
		self.clear_tables()
		self.clear_pages_data(map)

	def clear_tables(self):
		Registry.get('mongo').spider_urls.drop()
		Registry.get('mongo').spider_headers.drop()

	def _get_pages_list(self, map):
		expr = re.compile('^[a-z0-9]{32}$')
		if not len(self._pages):
			for page in os.listdir(Registry.get('data_path') + map):
				if expr.match(page):
					self._pages.append(page)

		return self._pages

	def _make_data_json_from_collection(self, coll):
		data = ''

		skip = 0
		while(True):
			row = Registry.get('mongo')[coll].find().limit(1).skip(skip)
			row = Common.mongo_result_to_list(row)

			if not len(row):
				break # may be break if not len?

			row = row[0]
			del row['_id']

			data += json.dumps(row) + "\n"
			skip += 1

		return data

	def _load_json_data_to_collection(self, file, type):
		file = Registry.get('results_path') + file + ".headers.json" if type == 'headers' else Registry.get('results_path') + file + ".spider.json"
		if not os.path.exists(file):
			raise SpiderException("Load-map " + file + " not exists!")

		coll = Registry.get('mongo').spider_headers if type == 'headers' else Registry.get('mongo').spider_urls
		coll.drop()

		f = open(file, 'r')
		for line in f:
			try:
				coll.insert(json.loads(line), safe=True)
			#except MongoCursorException: #TODO какое здесь должно быть?
			except Exception:
				pass
#TODO debug-режим с общим логом работы
		if type == 'headers':
			coll.create_index([('header', 1)])
		else:
			coll.create_index([('hash', 1)], unique=True, dropDups=True)

		return coll.count()

	def headers(self, url):
		headers = self._parse_headers(url['content'], url['path'])
		self._put_haders_in_db(headers, url)

	def write_headers(self):
		coll = Registry.get('mongo').spider_headers
		data = ''

		skip = 0
		while True:
			header = coll.find().limit(1).skip(skip)
			header = mongo_result_to_list(header)
			if len(header):
				header = header[0]
				del header['_id']
				data += json.dumps(header)
			else:
				break

			skip += 1
		self._write_data("headers", data, "headers")

	def _write_data(self, name, data, type):
		if type not in ['parsers', 'checkers', 'headers']:
			raise SpiderException("Unknown type of object")
		f = open(Registry.get('results_path') + self._sitemap_name + name + '.txt', 'a') #TODO заменить sitemapname на текущее имя
		f.write(data)
		f.close

	def _put_headers_in_db(self, headers, url):
		coll = Registry.get('mongo').spider_headers
		create_index = not coll.count()

		for header in headers:
			header_array = coll.find_one({header: {"$exists": True}})

			if header_array:
				header_array[header].append({'url': url, 'value': headers[header]})
				coll.save(header_array)
			else:
				header_array = {header: []}
				header_array[header].append({'url': url, 'value': headers[header]})
				coll.insert(header_array);

		if create_index:
			coll.create_index([('header', 1)])

	#TODO Нужен ли? В Requests должен быть соотв. функционал
	def _parse_headers(self, content):
		result = dict()
		content = content.split("\r\n\r\n")[0].split("\r\n");
		del content[0]
		for header in content:
			name = header.split(":", 1)[0].strip()
			value = header.split(":", 1)[1].strip()
			result[name] = value
		return result