#!/usr/bin/env python
# -*- coding: utf-8 -*-

my_id = '$Id: csv_fuzz_uniq.py 8 2013-01-06 16:09:36Z joymarquis@gmail.com $'

import sys
import csv
import difflib

try:
	import argparse
except ImportError:
	sys.path.append(sys.path[0] + "/lib/argparse-1.2.1")
	import argparse


def print_row_utf8(row, prefix=None):
	if prefix:	print prefix,
	for col in row: print col.decode('utf-8') + '\t',
	print

def str_remove_ignored_word(str_u, ignore_word_list_u):
	for i in ignore_word_list_u:
		str_u = str_u.replace(i, '')
	return str_u


def main():
	p = argparse.ArgumentParser(add_help=False)
	p.add_argument('-h', action='count', default=0, dest='p_d')
	(opts, args) = p.parse_known_args()

	epilog_str = '\n\n\t' + my_id
	is_test = True if opts.p_d > 1 else False
	is_hack = True if opts.p_d > 2 else False

	p = argparse.ArgumentParser(description='Find duplicated rows from a given csv file, then add a new column to indicate the stat',
			epilog=epilog_str, formatter_class=argparse.ArgumentDefaultsHelpFormatter)

	p.add_argument('--if', metavar='IN_FILE', default='in.csv', required=True,
			dest='p_if', help='input csv file')
	p.add_argument('--of', metavar='OUT_FILE', default='out.csv',
			dest='p_of', help='output csv file')
	p.add_argument('--fuzz_level', metavar='LEVEL', type=float, default=0.6,
			dest='p_fuzz_level', help='larger value for strict match, smaller value for loose match. range: 0.0-1.0')
	p.add_argument('--if_delim', metavar='DELIM', default=';',
			dest='p_if_delim', help='delimeter for parsing input file')
	p.add_argument('--of_delim', metavar='DELIM', default=';',
			dest='p_of_delim', help='delimeter for generating output file')
	p.add_argument('--fuzz_ignore', metavar='STR', default=[], nargs='+',
			dest='p_fuzz_ignore', help='words to be ignored during fuzz match')

	p.add_argument('--uniq_col', metavar='COL_NUM', default=0,
			dest='p_uniq_col', help='column which is unique, like "User Name"' if is_test else argparse.SUPPRESS)
	p.add_argument('--fuzz_col', metavar='COL_NUM', default=1,
			dest='p_fuzz_col', help='column for fuzzy match, like "Address"' if is_test else argparse.SUPPRESS)
	p.add_argument('--uniq_mark', metavar='STR', default='+',
			dest='p_uniq_mark', help='string used to mark the uniq row' if is_test else argparse.SUPPRESS)
	p.add_argument('--dupl_mark', metavar='STR', default='-',
			dest='p_dupl_mark', help='string used to mark the duplicate row' if is_test else argparse.SUPPRESS)
	p.add_argument('--invl_mark', metavar='STR', default='x',
			dest='p_invl_mark', help='string used to mark the invalid row' if is_hack else argparse.SUPPRESS)

	p.add_argument('--fuzz_replace', metavar='STR1,STR2[,...]]', default=[], nargs='+',
			dest='p_fuzz_replace', help='NOT_IMPLEMENTED: words STRn in same set are treated as same word during fuzz match' if is_hack else argparse.SUPPRESS)

	p.add_argument('--ouf', metavar='OUT_FILE', default='out_uniq.csv',
			dest='p_ouf', help='output file containing unique rows only, no marker' if is_hack else argparse.SUPPRESS)
	p.add_argument('--odf', metavar='OUT_FILE', default='out_dup.csv',
			dest='p_ouf', help='output file containing duplicated rows only, no marker' if is_hack else argparse.SUPPRESS)

	p.add_argument('-v', action='count', default=0,
			dest='p_v', help='verbose output')
	p.add_argument('--version', action='version', version='%(prog)s r'+'$Revision: 8 $'.replace('$Revision: ', '').replace('$', ''))
		 

	opts = p.parse_args()

	fi_name = opts.p_if
	fo_name = opts.p_of
	fi_delim = opts.p_if_delim
	fo_delim = opts.p_of_delim
	match_cutoff = opts.p_fuzz_level
	v_level = opts.p_v

	col_uniq_n = opts.p_uniq_col
	col_fuzz_n = opts.p_fuzz_col
	col_uniq_m = opts.p_uniq_mark
	col_dupl_m = opts.p_dupl_mark
	col_invl_m = opts.p_invl_mark
	fuzz_ignore_word_list = opts.p_fuzz_ignore

	# check parameter
	if match_cutoff < 0.0 or match_cutoff > 1.0:
		print 'Error: fuzz_level', match_cutoff, 'out of range'
		exit(1)

	print '=' * 80
	print '          input file :', fi_name
	print '          fuzz level :', match_cutoff
	print '           uniq mark :', col_uniq_m
	print '           dupl mark :', col_dupl_m
	print '           invl mark :', col_invl_m
	print '-' * 80

	_stat_row_total = 0
	_stat_row_uniq = 0
	_stat_row_dupl = 0
	_stat_row_invl = 0
	_stat_dupl_keywords = 0

	with open(fi_name, 'rb') as fi:
		with open(fo_name, 'wb') as fo:
			addrs = csv.reader(fi, delimiter=fi_delim, skipinitialspace=True)
			addrs_new = csv.writer(fo, delimiter=fo_delim)
			row_curr = next(addrs, None)
			while True:
				if not row_curr:	break
				row_next = next(addrs, None)
				is_same_entry = ((cmp(row_curr[col_uniq_n], row_next[col_uniq_n]) == 0) if row_next else False)
				if not is_same_entry:
					# write current uniq entry
					if len(row_curr) > 1:
						row_new = [col_uniq_m] + row_curr
						_stat_row_uniq += 1
					else:
						row_new = [col_invl_m] + row_curr
						_stat_row_invl += 1
					_stat_row_total += 1

					addrs_new.writerow(row_new)
					if v_level:	print_row_utf8(row_new)
					row_curr = row_next
					continue

				# fetch duplicate entry
				_stat_dupl_keywords += 1
				row = []
				num_dup = 0
				row.append(row_curr)
				row.append(row_next)
				num_dup += 2
				while True:
					row_next = next(addrs, None)
					if row_next and len(row_next) < 2:	_stat_row_invl += 1
					is_same_entry = (cmp(row_curr[col_uniq_n], row_next[col_uniq_n]) == 0) if row_next else False
					if is_same_entry:
						row.append(row_next)
						num_dup += 1
						continue
					else:
						row_curr = row_next
						break

				# check duplicate entry
				for i in range(num_dup):
					match_context = None
					col_u = str_remove_ignored_word(row[i][col_fuzz_n], fuzz_ignore_word_list).decode('utf-8')
					for j in range(i+1, num_dup):
						col_next_u = str_remove_ignored_word(row[j][col_fuzz_n], fuzz_ignore_word_list).decode('utf-8')
						match_context = difflib.get_close_matches(col_u, [col_next_u], 1, match_cutoff)
						if match_context:
							break
					if match_context:
						# write duplicate entry then try next
						if len(row[i]) > 1:
							row_new = [col_dupl_m] + row[i]
							_stat_row_dupl += 1
						else:
							row_new = [col_invl_m] + row[i]
							_stat_row_invl += 1
						_stat_row_total += 1

						addrs_new.writerow(row_new)
						if v_level:	print_row_utf8(row_new)
					else:
						# write unique entry into new list
						if len(row[i]) > 1:
							row_new = [col_uniq_m] + row[i]
							_stat_row_uniq += 1
						else:
							row_new = [col_invl_m] + row[i]
							_stat_row_invl += 1
						_stat_row_total += 1

						addrs_new.writerow(row_new)
						_stat_row_total += 1
						_stat_row_uniq += 1
						if v_level:	print_row_utf8(row_new)

	if v_level:
		print '-' * 80
	print '          ouput file :', fo_name
	print '           total row :', _stat_row_total,
	print '    (uniq=' + str(_stat_row_uniq), ' dupl=' + str(_stat_row_dupl) + ' invl=' + str(_stat_row_invl) + ')'
	print ' duplicated keywords :', _stat_dupl_keywords 
	print '=' * 80
	if _stat_row_invl > 1:
		print 'Warning: invalid rows detected, are you using correct delimiter for input file?'


if __name__ == "__main__":
	main()
