import re
import time
import codecs
from os.path import join
import sys
from walk import read_file
import multiprocessing
import pickle


def get_interface():
	start = time.time()
	f_l = common_file_list(r".\data\interface_collect.txt")
	p_r = list()
	for item in f_l:
		# regex example:interface IRecipientSession : IDirectorySession, IConfigDataProvider
		#p = '.*interface\s([a-zA-Z]*).*:.*' + item.strip()
		# regex example:internal abstract class ADSession : IDirectorySession
		p = '.*class\s([a-zA-Z]*)\s:.*'+item.strip()
		p_r.append(re.compile(p))

	f_l = get_file_list()

	total = 0
	f_result = open(r'.\data\level1.raw','w')
	f_result_a = open(r'.\data\level1.ass','w')

	bom_utf8 = codecs.BOM_UTF8.decode('utf-8')
	for item in f_l:
		count = 0
		f_cache = list()
		f_cache = read_file(item)
		f_cache_len = len(f_cache)
		for line in f_cache:
			total = total + 1
			count = count + 1
			for p_r_item in p_r:
				p_match = p_r_item.match(line)
				if p_match:
					print(line,file=f_result)
					print("--------------------------------------------------\nfile_name: ",item," line number: ", count, file = f_result_a)
					for index in range(count - 10, count + 10): 
						if index < f_cache_len:
							l = f_cache[index]
							print(l,file=f_result_a)
		f_result.flush()
		f_result_a.flush()
		
	f_result.close()
	f_result_a.close()

	end = time.time()

	print(total,"lines are processed")
	print(end - start, ' seconds consumed !')

def show_wait(): 
	while True:
		print("-",end='')
		sys.stdout.flush() 
		time.sleep(1.0)

def print_c(l):
	for item in l:
		print(item)

def get_method_list():
	return read_file("method_collect.txt")	

def get_class_list():
	return read_file("./data/class_collect.txt")

def get_file_list(): 
	file_list = list()
	with open('./data/files_collect.txt','r') as  f:
		for line in f:
			file_list.append(line.strip())
	return file_list

def get_regex_list(c):
	p_r = list()
	for i in range(len(c)):
		# this is the place where we commonly replace regex
		m = ".*" + c[i].strip() + ".*$"
		#m = '\s([a-zA-Z]*)\s*new' 
		p_r.append(re.compile(m))
	return p_r

# read common files and strip the \r\n and bom characters
def common_file_list(f_name):
	cache = list()
	bom_utf8 = codecs.BOM_UTF8.decode('utf-8')
	try:
		with open(f_name,"rb") as f:
			cache = f.readlines()
		for i in range(len(cache)): 
			cache[i] = cache[i].decode('utf-8','ignore').lstrip(bom_utf8).strip()
	except:
		return list()
	return cache

# continue from new_stage and do more filtering
def new_stage_2():
	f_list = common_file_list('./data/new_tmp.txt')
	c_list = get_class_list()
	p_r = list()
	for i in range(len(c_list)):
		p = '.*\('+c_list[i] + '\s([a-zA-Z]*),'
		p_r.append(re.compile(p))
	# example a = new b(x)
	#p = '.*[\s\.]([a-zA-Z]*)\s=\snew\s([a-zA-Z]*)\(' 
	#example : ITopologyConfigurationSession session
	#p = 'a-zA-Z\[\]]*)\s([a-zA-Z])\s='
	#example: ((IConfigDataProvider)domainControllerSession).Save(
	p = '\(\((.*)\)(.*)\)\.[a-zA-Z]*\('

	# path match regex
	m = r'.*,(d:.*)'

	p_r = re.compile(p)
	m_r = re.compile(m)
	f_object_with_path = open('./data/level1_result_before_six.txt','w')
	f_object_with_path_1 = open('./data/level1_result_before_six_new.txt','w')
	f_new_tmp = open('./data/before_six.txt','w')
	for item in f_list:
		content = item.split(',')[0]
		match1 = m_r.match(item)
		path = match1.group(1)

		#print(content)
		#print(path)
		#for j in range(len(p_r)):
		#	match = p_r[j].match(item)
		match = p_r.match(item) 
		if match:
			#print(match.group(2), match.group(1), path,sep=',',file=f_object_with_path)
			w = match.group(2)
			if w.count('.') != 0:
				w = w.split('.')[1]
			print(match.group(1), w, sep=',',file = sys.stdout)
			print(match.group(1), w, path,sep=',',file = f_object_with_path_1)

			#print(match.group(1), match.group(2), path,sep=',',file=f_object_with_path)
			#break
		else:
			#if j == len(p_r) - 1:
			print(item,file=f_new_tmp)

	f_object_with_path.close()
	f_object_with_path_1.close()
	f_new_tmp.close()

# continue from new_stage_2 and get those methods called by the object get during new_stage_2 and mark the place
def new_stage_3():
	f_list = common_file_list('./data/iplist_rec')
	f_list_1 = common_file_list(r'.\data\class_category.txt')

	t_dict = dict()
	for item in f_list_1:
		t = item.split('\t')[0]
		category = item.split('\t')[1]
		t_dict[t] = category


#	for i in t_dict.keys():
#		print(i,t_dict[i],sep='\t')
#	return 
	
	for item in f_list:
		val_t = item.split(',')[0]
		val = item.split(',')[1]
		path = item.split(',')[2]
		stage_3_process(t_dict,val_t,val,path)
	
	pass

def stage_3_process(t_dict,val_t, val, path):
	cache = common_file_list(path)
	p = '.*'+val+'\.([a-zA-Z<>]*)\('
	p_r = re.compile(p)
	f_t_v_p = open(r'.\data\forplay.txt','a')
	for item in cache:
		match = p_r.match(item)
		if match:
			print(t_dict[val_t], val_t,val,match.group(1),path,sep = ',',file=f_t_v_p)

	f_t_v_p.close()

def test(): 
	f_list = common_file_list(r'.\data\level1_assistant.raw')
	p = '.*session\.([a-zA-Z<>]*)\('
	p_r = re.compile(p)
	for item in f_list:
		match = p_r.match(item)
		if match:
			print(match.group(),match.group(1),sep='\n')

def new_stage():
	start = time.time()

	# get list of specific classes
	c_list = get_class_list()
	p_r = get_regex_list(c_list) 
	file_list = get_file_list() 
	
	# get all lines with specific patterns
	f_result = open('./data/level1.raw','w')
	#print the above and below 10 lines 
	f_result_assistant = open('./data/level1.ass','w')
	# print specific information extracted from the lines
	f_level1_record = open('./data/level1.rec','w')

	total = 0
	bom_utf8 = codecs.BOM_UTF8.decode('utf-8')
	for file_index in file_list:
		f_cache = list()
		f_cache = read_file(file_index)
		f_cache_len = len(f_cache)
		count = 0
		for line in f_cache:
			total = total + 1
			count = count + 1
			#p_match = p_regex.match(line)
			for i in range(len(p_r)):
				p_match = p_r[i].match(line)
				if p_match:
					#write result
					print(line.strip(), count, file_index, sep = ',', file=f_result)
					print(line.strip(), file_index, sep = ',', file=f_level1_record)
					#write result assistant
					print("========================================\r\nfile_name: ",file_index,"  line number:",count,file=f_result_assistant)
					for index in range(count - 10, count + 10): 
						if index < f_cache_len:
							l = f_cache[index]
							if l.count('/') == 0:
								print(l,file=f_result_assistant)
					break
		f_result.flush()
		f_result_assistant.flush()
		f_level1_record.flush()
	f_result.close()
	f_result_assistant.close()
	f_level1_record.close()
	end = time.time()
	print(total," lines are processed")
	print(end - start,' seconds consumed!')

def stage_1():
	start = time.time()
	m_list = method_list()
	p_r = list()
	for i in range(len(m_list)):
		m = ".*\s(.*)\.(" + m_list[i].strip() + "[a-zA-Z]*)\("
		p_r.append(re.compile(m))

	#p_regex = re.compile(p)

	#read file_paths
	file_list = list()
	with open('./data/files_collect.txt','r') as  f:
		for line in f:
			file_list.append(line.strip())
	
	f_result = open('./data/level1.raw','w')
	f_result_assistant = open('./data/level1_assistant.raw','w')
	f_level1_record = open('./data/level1.record','w')

	total = 0
	bom_utf8 = codecs.BOM_UTF8.decode('utf-8')
	for file_index in file_list:
		sys.stdout.flush()
		print("-",end ='',file=sys.stderr)
		f_cache = list()
		f_cache = read_file(file_index)
		f_cache_len = len(f_cache)
		count = 0
		for line in f_cache:
			total = total + 1
			count = count + 1
			#p_match = p_regex.match(line)
			for i in range(len(p_r)):
				p_match = p_r[i].match(line)
				if p_match:
					#write result
					print(p_match.group(), count, file_index, sep = ',', file=f_result)
					print(p_match.group(1), p_match.group(2), file_index, sep = ',', file=f_level1_record)
					#write result assistant
					print("========================================\r\nfile_name: ",file_index,"  line number:",count,file=f_result_assistant)
					for index in range(count - 5, count + 5): 
						if index < f_cache_len:
							l = f_cache[index]
							if l.count('/') == 0:
								print(l,file=f_result_assistant)
	f_result.close()
	f_result_assistant.close()
	f_level1_record.close()
	end = time.time()
	print(total," lines are processed")
	print(end - start,' seconds consumed!')

def regex_test():
	s3 = "internal static ExchangeTopology Discover(ITopologyConfigurationSession session, ExchangeTopologyScope scope)"
	s4 = "  ITopologyConfigurationSession  session, ExchangeTopologyScope scope)"
	s2 = "internal static ExchangeTopology Discover(ExchangeTopologyScope scope,ITopologyConfigurationSession session, ExchangeTopologyScope scope)"
	p2 = ".*[,(\s]([A-Z][a-zA-Z]{1,})\s*session"
	p2_regex = re.compile(p2)
	for item in [s2,s3,s4]:
		match = p2_regex.match(item)
		if match:
			print(match.group(1))
		else:
			print("failed")
	return

	s = ' this.DeleteAllActualPhotoItems();'
	p = ".*\s(.*)\.(Delete[a-zA-z]*)\("
	p_r = re.compile(p)
	match = p_r.match(s)
	if match:
		print(match.group(1),match.group(2))
	else:
		print("failed")
	

	return 
	s1 = 'sdfsdf9"sdfsdf d fsdf"' 
	s3 = '"sdfsdf d fsdf"' 
	s2 = "///sdfs"
	s4 = "      ///"
	p = '.*["/].*$' 
	p_r = re.compile(p)
	for item in [s1,s2,s3,s4]:
		match = p_r.match(item) 
		if match:
			print("pass")
		else:
			print("failed")


	return 

	s = 'ADPagedReader<ADRecipient> results = globalCatalogSession.FindPaged'
	p = ".*\s(.*)\.FindPaged"
	p1 = "\s*([a-zA-Z]{1,})\s*globalCatalogSession"
	f_cache = read_file('./data/test.cs')
	p_regex = re.compile(p)
	word = list()
	for item in f_cache:
		match = p_regex.match(item)
		if match: 
			print(match.group(),match.group(1),sep=',')
			word.append(match.group(1))
	p1 = '\s*([A-Z][a-zA-Z]{1,})\s*' + word[0]
	p1_regex = re.compile(p1)
	for item in f_cache:
		match = p1_regex.match(item)
		if match:
			print(match.group(1))
	

def test1():
	f_list = common_file_list(r'.\data\data.dic')
	m_collect = ['Find','Delete','FindPaged','Read','Save']
	m_dict = dict()
	for_play = open("for_play.txt",'w')
	for item in m_collect:
		m_dict[item] = 0
	for item in f_list:
		val = item.split(',')[3]
		if val.count('FindPaged') != 0:
			m_dict['FindPaged'] = m_dict['FindPaged'] + 1
		elif val.count('Find') != 0 :
			m_dict['Find'] = m_dict['Find'] + 1
		elif val.count('Delete') != 0:
			m_dict['Delete'] = m_dict['Delete'] + 1
		elif val.count('Read') != 0:
			m_dict['Read'] = m_dict['Read'] + 1
		elif val.count('Save') != 0:
			m_dict['Save'] = m_dict['Save'] + 1
		else:
			print(item,file=for_play)
	for_play.close()
	
	f_result = open('statistics_result.dic','w')

	total = 0
	for i in m_dict.keys():
		print(i,m_dict[i],sep=',',file=f_result)
		total = total + m_dict[i] 
	print(total)

	f_result.close()


def test2():
	f_list = common_file_list(r'.\data\data.dic')
	r = dict()
	#for item in f_list:
	#	val = item.split(',')[4]
	#	l = val.split('\\')
	#	key = l[5]
	#	sub_key = l[7]
	#	tmp = r.get(key, dict())
	#	count = tmp.get(sub_key, 0)
	#	count = count + 1
	#	tmp[sub_key] = count
	#	r[key] = tmp

	for item in f_list:
		val = item.split(',')[4]
		l = val.split('\\')
		key = l[5]
		sub_key = l[7]
		tmp = r.setdefault(key,dict())
		count = tmp.setdefault(sub_key,0)
		count = count + 1
		tmp[sub_key] = count

	#for item in r.keys():
	#	print(item ,r[item], sep = '')
	f_r = open(r'.\data\category.rec','w')
	for i in r.keys():
		item = r[i]
		for j in item.keys():
			print(i,j,item[j],sep=',',file=f_r)
	f_r.close()
		
def test3():
	f_list = common_file_list(r'.\data\data.dic')
	p = r'.*helper\.cs|.*utils.cs'
	p_r = re.compile(p,re.IGNORECASE)
	comm_count= 0
	uncomm_count = 0
	for item in f_list:
		val = item.split(',')[4]
		l = val.split('\\')
		key = l[5]
		if key == 'management':
			size = len(l)
			f_name = l[size-1]
			match = p_r.match(f_name)
			if match:
				comm_count = comm_count + 1
			else:
				uncomm_count = uncomm_count + 1
		else:
			comm_count = comm_count + 1
	f_r = open(r'.\data\common_and_uncommon.csv','w')
	print('common_count',comm_count,sep=',',file=f_r)
	print('uncommon_count',uncomm_count,sep=',',file=f_r)
	f_r.close()

def test4(): 
	f_list = common_file_list("380record.csv")
	for item in f_list:
		record = item.split(',')
		category = record[0]
		class_t = record[1]
		variable = record[2]
		method = record[3]
		f_path = record[4]
		p = r'.*' + method + '.*'
		p_r = re.compile(p)
		l_list = common_file_list(f_path);
		match_list = list()
		codes = ""
		#get all code lines that matches
		for i in l_list: 
			match = p_r.match(i)
			if match:
				match_list.append(i);
		
		
		for j in match_list: 
			codes = codes + j
		
		if(codes != ''):
			print(category, class_t, variable, method, codes, f_path, sep = ',', file=open('tmp0625.csv','a'));

def test5():
	f_list = common_file_list("380record.csv")
	method_dict = dict()
	for item in f_list:
		tmp = list()
		record = item.split(',')
		key = record[3]
		tmp = method_dict.setdefault(key, list())
		tmp.append(record)

	for item in method_dict.keys():
		codes = ''
		value = method_dict[item]
		category = value[0][0]
		class_t =  value[0][1] 
		variable = value[0][2]
		path =     value[0][4]

		for i in value:
			codes = codes + inner_test5(item, path)
		print(category, class_t, variable, item, sep = ',', file=open('result0626.csv','a'))
		print("==================================================\r\n",file=open('source_codes0626.txt','a'))
		print("category: " + category, "class type: " + class_t, "variable: "+ variable, "method:" + item, codes, sep='\r\n',file=open('source_codes0626.txt','a'))



def inner_test5(method,f_path):
	p = r'.*' + method + '.*'
	p_r = re.compile(p)
	#l_list = common_file_list(f_path);
	l_list = test8(f_path);
	match_list = list()
	codes = '\r\n' + f_path + '\r\n'
	#get all code lines that matches
	for i in range(len(l_list)): 
		matched_line = l_list[i]
		match = p_r.match(matched_line)
		if match:
			begin = i - 5
			end = i + 5
			if begin < 0:
				begin = 0
			if end > len(l_list)- 1:
				end = len(l_list) - 1
			line_no = "matched line number is :%s \r\n" % i
			match_list.append(line_no)
			for j in range(begin,end):
				line = l_list[j]
				match_list.append(line)
	
	for j in match_list: 
		codes = codes + j + '\r\n'
	
	return codes



def test7():
	i = 5
	result = "The line number is %s \r\n" %i
	print(result)
		

def test8(f_name):
	cache = list()
	bom_utf8 = codecs.BOM_UTF8.decode('utf-8')
	try:
		with open(f_name,"rb") as f:
			cache = f.readlines()
		for i in range(len(cache)): 
			cache[i] = cache[i].decode('utf-8','ignore').lstrip(bom_utf8)
	except:
		return list()
	return cache



if __name__ == "__main__":
	p = multiprocessing.Process(target=show_wait)
	p.start()
	#new_stage()
	#new_stage_2()
	#new_stage_3()
	#test()
	#get_interface()
	#test3()
	test5()
	p.terminate()

	#stage_1()
