#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import codecs, sys, os, re, pickle
from struct import *
from time import time, strftime, gmtime, sleep

class progressbarClass: 
    def __init__(self, finalcount, progresschar=None):
        import sys
        self.finalcount=finalcount
        self.blockcount=0

        if not progresschar: self.block=chr(178)
        else:                self.block=progresschar

        self.f=sys.stdout

        if not self.finalcount : return
        return

    def progress(self, count):
        count=min(count, self.finalcount)
        if self.finalcount:
            percentcomplete=int(round(100*count/self.finalcount))
            if percentcomplete < 1: percentcomplete=1
        else:
            percentcomplete=100
            
        blockcount=int(percentcomplete/2)
        if blockcount > self.blockcount:
            for i in range(self.blockcount,blockcount):
                self.f.write(self.block)
                self.f.flush()
                
        if percentcomplete == 100: self.f.write("\n")
        self.blockcount=blockcount
        return

class TextParser:
	'''
	合併資料夾內檔案
	產生正、反主檔
	產生各檔案 offset 範圍索引
	傳入 location: 結果（與索引）存放位置
	'''
	
	def __init__(self, location):
		if location[-1] == '/':									#統一參數（for os.path.split(), or location+'/...'）
			location = location[:-1]
		self.location = location								#結果（與索引）存放位置

	def __saveSectionInf( self, ref, offset ):
		'''
		save line information
		fw.write( pack('17sI', str(ref), f.tell()-2) )
		'''
		self.fwl.write( pack('17sI', str(ref), offset) )		#self.fwl.write( pack('%dsI' % self.slen, str(ref), offset) )
		return

	def getFnLen(self, texts):
		'''
		取得檔名最長的數字
		'''
		nlen = 0
		for k in texts:
			fn = os.path.split(k)[1]
			fn = fn.split('.')[0]
			if len(fn) > nlen:
				nlen = len(fn)
		return nlen

	def breakTags(self, text):
		'''
		去掉原本的換行符號，處理要斷行的標記。
		換行的 tags: p, lg, l, title, byline, docNumber, head.
		其他 tags 去除。
		'''
		text = text.replace('\n', '')
		text = re.sub(r'<[ptbdh][ iyoe][^>]+>', '\n', text)
		text = re.sub(r'<l>|<lg>|<p>', '\n', text)
		text = re.sub(r'</[^>]+>', '\n', text)
		text = re.sub(r'\n+', '\n', text)						#一個以上連續的換行符號全部換成一個
		text = re.sub(r'<.+?>', '', text)						#去除剩餘的 tags
		return text
		
	def tackoutPunctuation(self, text):
		'''
		去掉中文標點符號，檢索無標點內文時用
		'''
		return re.sub(ur'[。，、；：「」『』（）？！—…《》〈〉．“”‧﹄﹂]', '', text)

	def parseTexts(self, texts, opt='', pun=''):
		'''
		合併資料夾內文件群，輸出正反兩全文檔案（純文字＋換行）。
		
		texts = [檔名陣列]
		
		opt = '':	不調整原檔內容
		opt = 'bk':	去除換行符號，以內定 tags 為新換行點。換行 tags(TEI.5) 如下:
					p, lg, l, title, byline, docNumber, head, div.（其中 <div> CBETA 文件無）
		
		pun = '':	不調整
		pun = 'on':	去除標點符號
		'''
		if opt == 'bk':
			print '** parsing breakline tags **'
		if pun == 'on':
			print '** corpus no punctuation **'

		#先用 17 代替		
		#self.slen = self.getFnLen(texts)							#取得檔名長度，記錄 lb 用
		
		fwf = codecs.open(self.location+'/Text_f', 'w', 'utf16')	#純文字檔
		self.fwl = open(self.location+'/Text_f_lb', 'wb')			#行號索引 lb
		
		os.mkdir( self.location+'/bktmpf' )							#反向檔暫存區
		
		offset = 2													#總 offset byte (BOM)
		
		#parse 每個檔案，文字累加存檔，處理標記
		fct = 0
		for k in texts:
			fct += 1
			if fct%300 == 0:
#				print fct, '/', len(texts), 'parsed.'	//log檔案
#				sys.stdout.write('Percent complete: 0%--------')
				print ""
				
			bkn = os.path.split(k)
			f_tmp = codecs.open(self.location+'/bktmpf/'+bkn[1], 'w', 'utf16')		#全文暫存，給反向檔 reverse()
			lbref = bkn[1].split('.')[0]
			self.__saveSectionInf( lbref.strip(), offset )							#記錄各單位 offset index
			
			f_tmp.write('\n')
			fwf.write('\n')
			offset += 2
			
			f = codecs.open(k, 'r', 'utf8')							#預設讀取的檔案的編碼 (可改成自動偵測)
			l = f.read()
			f.close()
			
			if opt == 'bk':											#處理'\n'與標記換行
				l = self.breakTags(l)
			if pun == 'on':											#是否去掉標點
				l = self.tackoutPunctuation(l)
			
			flag = 'off'											#記錄 tag 用
			pre = '\n'												#記錄正要處理字元的前一個字元
			for w in l:												#陣列處理約比對檔案 binary 處理快 4 倍
				if w == '\n' and pre == '\n':						#雖然先處理過連續換行，處理完行號後還可能出現連續換行
					continue										#ex: \n(</title>)<lb.../>\n(<doc>)
			
				if ord(w) >= 0x20000:								#Ext.B 開始字元 u'\u20000'
					length = 4
				else:
					length = 2
			
				f_tmp.write(w)
				fwf.write(w)
				offset += length
				pre = w

			f_tmp.write('\n')										#每個檔案頭尾都個加一個換行符號
			fwf.write('\n')
			offset += 2

			f_tmp.close()
#		print fct, '/', len(texts), 'parsed.'		//log檔案
		sys.stdout.write('\nLoading (please wait):\n0%--------')
		pb=progressbarClass(8,"-")
		count=0
		while count<9:
			count+=1
			pb.progress(count)
			sleep(0.3)
		
		fwf.close()
		self.fwl.close()
		
#		print 'Creating backword fulltaxt ...'		//log檔案
		sys.stdout.write('20%-------')
		self.__getBackwardFulltext(self.location+'/bktmpf', 'Text_b')
		pb=progressbarClass(8,"-")
		count=0
		while count<9:
			count+=1
			pb.progress(count)
			sleep(0.3)

#		print 'Getting update lb subindex ...'		//log檔案
		sys.stdout.write('40%-------')
		pb=progressbarClass(8,"-")
		count=0
		while count<9:
			count+=1
			pb.progress(count)
			sleep(1)

		self.__completeLBindex(self.location+'/Text_f_lb', offset, 'Text_lb')

		#回傳 ( 正檔名, 反檔名, 檔案大小（offset 大小）, 行號索引檔名, 正向 g, anchor 索引檔名 ) 
		return self.location+'/Text_f', self.location+'/Text_b', offset, self.location+'/Text_lb'			
		
	def __getBackwardFulltext(self, folder, fn):
		'''
		合併所有暫存反向檔
		folder = 反向檔暫存資料夾
		fn = 產生新檔案的名稱
		'''
		L = getFiles(folder)
		L.sort()
		L.reverse()
		
		fwb = codecs.open(self.location+'/'+fn, 'w', 'utf16')	#反向檔
		for k in L:
			f_tmp = codecs.open(k, 'r', 'utf16')
			l_tmp = f_tmp.read()
			f_tmp.close()
			os.remove(k)
			
			tmp_bk = list( l_tmp )				#檔案字串轉陣列
			tmp_bk.reverse()					#reverse 陣列
			tmp_bk = ''.join(tmp_bk)			#合併陣列為字串
			fwb.write( tmp_bk )

		fwb.close()
		os.rmdir(folder)
		return 'backward fulltext %s ok.' % fn

	def __completeLBindex( self, FWsubindex, totalofst, fn ):
		'''
		用正向檔行號 subindex 算反向的行號位置
		產生完整行號索引檔
		FWsubindex = 正向檔行號索引檔
		totalofst = 正向檔總 offset 值
		fn = 產生新檔案的名稱
		'''
		flb = open(self.location+'/'+fn, 'wb')
		data1 = {}
		f = open(FWsubindex, 'rb')
		while f.tell() < os.stat( FWsubindex )[6]:
			tmplb = unpack( "17sI", f.read(24) )
			bkofst = totalofst - tmplb[1]
#			flb.write( pack('17sII', tmplb[0], tmplb[1], bkofst) )
			data1[tmplb[0].replace('\x00', '')] = (tmplb[1], bkofst)
		f.close()
		pickle.dump(data1, flb)
		flb.close()
		os.remove(FWsubindex)
		return 'backward subindex %s ok.' % fn

def getFiles( folder ):
	'''
	傳入路徑
	回傳該路徑下所有檔案名稱陣列
	'''

	if folder[-1] != '/':
		folder = folder + '/'
	Li = os.listdir( folder )
	L = []
	for k in Li:
		if os.path.isdir( folder+k ):
			L += getFiles(folder+k+'/')
		else:
			L.append(folder+k)
	L.sort()
	return L

def Parser(allfolder):
	'''
	將目錄中的文字檔做預先 parse，建立索引，記錄 lb 檔案連結資訊
	'''
	
	folder = [allfolder]
	tag = ''	#'bk'
	pun = ''	#'on'

	for tk in folder:

		folder = tk
#		print folder, 'start ----------------------'  //log記錄

		try:
			L = getFiles(folder)
			L.sort()
		except:
			print "Warning: a 'path' within files needed!"
			continue
			sys.exit()
	
		ifolder = './Parsed_Text/'+folder.lower()		#建立存放索引檔案的目錄
		if os.path.isdir( ifolder ):
			print " Warning:\n files in '" + ifolder + "' have been indexed!\n Do you want to \"Re-index\" any previously index files."
			xinput = raw_input(" [Y/n] ")
			if xinput == "n":
				break
		else:
			os.makedirs( ifolder )
		
		Ts = time()
		obj = TextParser(ifolder)						#產生parse後的相連文檔正反，及檔案相連ofset之索引檔
		n = obj.parseTexts(L, tag, pun)
#		print n		//log檔案											#回傳 ( 正檔名, 反檔名, 檔案 ofset 索引檔名 )  
		
		Te = time()
#		print strftime('%H:%M:%S', gmtime(Te-Ts))			

		Tsa1 = time()
		
		objS = Index(n[0])								#建正檔的 Suffix Array
		objS.create()
		sys.stdout.write('70%-------')
		pb=progressbarClass(8,"-")
		count=0
		while count<9:
			count+=1
			pb.progress(count)
			sleep(1)		
		
		objS = Index(n[1])								#建反檔的 Suffix Array
		objS.create()
		sys.stdout.write('90%-------')
		pb=progressbarClass(8,"-")
		count=0
		while count<9:
			count+=1
			pb.progress(count)
			sleep(0.3)
		
		Tsa2 = time()
#		print strftime('%H:%M:%S', gmtime(Tsa2-Tsa1)) //log檔案
		sys.stdout.write('OK Finished!!')

class Index:
	'''建立與檢索 Suffix Array 的 class'''
	
	def __init__( self, fname ):
		'''
		[ fname ] = The main text file. 
		'''
		self.F = open( fname, 'rb' )			##開啟主文字檔##
		self.F_len = os.stat( fname )[6]		##主文字檔大小
		self.File_main = fname					##主要檔的檔名##
		
	def __getWords( self, ofst, length ):
		'''
		輸入 offset + 要取的字串長度
		回傳 offset 開始向後（右）要取的長度字串
		'''
		line = ''
		self.F.seek( ofst )
		bytes = (length + length/2) * 2		#轉成 bytes 數，比需要長度多取一半
		try:
			line = self.F.read(bytes).decode('utf16')
		except:
			self.F.seek( ofst )
			line = self.F.read(bytes+2).decode('utf16')
		return line[:length]
		
	def __getsdroW( self, ofst, length ):
		'''
		輸入 offset + 要取的字串長度
		回傳 offset 向前（左）要取的長度字串
		'''
		line = ''
		for k in range(length):
			if ofst - 2 >= 2:			#還沒到檔頭
				try:
					self.F.seek( ofst-2 )
					line = self.F.read(2).decode('utf16') + line
					ofst = ofst - 2
				except:
					self.F.seek( ofst-4 )
					line = self.F.read(4).decode('utf16') + line
					ofst = ofst - 4
			else:
				break
#				line = u'　' + line
		return line

####### Creat Suffix Array #######
	def create( self, cache=1000000 ):
		"""
		Create the suffix array of self.F.
		[ cache ] = No. of chars to save a tmp file.
		"""
		
		Ts = time()		##開始時間##
#		print 'indexing', self.File_main		//log檔案
		
		p = os.path.split( self.File_main )		##拆檔名 p[0] = 路徑, p[1] = 檔名##
		if os.path.isdir( p[0]+"/tmp" ):		#建 tmp 資料夾
			pass
		else:
			os.mkdir( p[0]+"/tmp" )
		tmpath = p[0]+"/tmp/"					##暫存目錄完整路徑##
		
		D = {}				##offset 與 word 的暫存 dic. 每次紀錄至 cache 大小後排序, 暫存出去, 再清空##
		c = n = 0			##c = 已處理中文字數的累加, n = 排序暫存檔數的累加 ##
		
		self.F.seek(2)		# utf16 檔案先跳過 2 bytes 的 BOM
		while self.F.tell() < self.F_len:
			offset = self.F.tell()		## self.F 目前的 offset ##
			try:
				w1 = self.F.read(2).decode('utf16')
				ofst = 2
			except:
				self.F.seek(offset)
				w1 = self.F.read(4).decode('utf16')
				ofst = 4

			#要建索引的 unicode 範圍
			wrd = ord(w1)
			if wrd==0x0028 or wrd==0x0029:
				pass
			elif (wrd >= 0x0022 and wrd <= 0x2EFF) or\
				(wrd >= 0x3020 and wrd <= 0x312F) or\
				(wrd >= 0x3400 and wrd <= 0xFE4F) or\
				(wrd >= 0xFF10 and wrd <= 0xFF19) or\
				(wrd >= 0xFF5C and wrd <= 0xFF5E) or\
				(wrd >= 0x20000 and wrd <= 0x2A6DF) or\
				(wrd >= 0xF0000 and wrd <= 0xF30DF) or\
				wrd==0xFF06 or wrd==0xFF0A or wrd==0xFF0B or wrd==0xFF0D or\
				wrd==0xFF1D or wrd==0xFF5C or wrd==0xFF5D or wrd==0xFF5E:
				c += 1
				w2 = self.__getWords( offset+ofst, 127 )	#每次比126個字
				D[offset] = w1 + w2	
#				print "%d, [%d: %s]\n%s" % ( c, offset, w1, w2 )
#				raw_input()
				
			if (c > 0 and c % cache == 0 and len(D) > 0) or offset+ofst == self.F_len:	#每一百萬字 or 檔案結束 儲存
				n += 1
				rst1 = self.__saveTmpFiles( D, n, tmpath )
				D = {}
				#print "%d / %d bytes, %d char. tmp %d saved %s." % ( offset, self.F_len, c, n, rst1 ) //log檔案

			self.F.seek( offset + ofst )
			
		Tm = time()		##分批排序暫存完畢後的時間##
#		print strftime('%H:%M:%S', gmtime(Tm-Ts))  #//log檔案
		
		self.__combinTmps( tmpath, p[1] )	#合併 suffix array 暫存檔
		Te = time()		##結束時間##
#		print strftime('%H:%M:%S', gmtime(Te-Ts))  #//log檔案
	
	def __saveTmpFiles( self, Dic, no, path ):
		"""Sort and save offset dictionary.
			[ Dic ] = {offset: 128 chi-char, ...} ex: {28: 'chi-string...', ...}
			[ no ] = No. of tmp files
			[ path ] = path of tmp files"""
		
		L = Dic.items()		##字典轉成陣列##
		L.sort( lambda x,y: cmp(x[1], y[1]) )
		fw = open( path+str(no), 'wb' )		##開暫欲寫入的存檔##
		for k in L:
			fw.write( pack('I', k[0]) )
#			print "offset: %d, string: %s" % ( k[0], k[1][0])
#			raw_input()
		fw.close()
		return 'ok'
		
	def __combinTmps( self, path, orgfn ):
		"""Combin sorted tmp files.
			[ orgfn ] = the file name of self.F
			[ path ] = folder of where all tmp files saved"""
		
		L = os.listdir( path )	##儲存所有暫存檔名的陣列##
#		print len(L)
#		print L
		if len(L) != 1:	#如果資料夾內還有兩個以上的檔案
#			print len(L)
#			raw_input()
			i = 0		##參數辨別讀到單數或雙數檔##
			for k in L:	#兩兩合併
				i += 1
				if i == 1:
					f1n = k		##暫存單數檔名##
				elif i == 2:
#					print f1n, k		//log檔案
					f1 = open( path+f1n, "rb" )			##開單數檔##
					f2 = open( path+k, "rb" )			##開雙數檔##
#					print "merge sorting", f1n, "and", k 	//log檔案
	
					fw = open( path+"tmp", "wb" )				##每累積兩個檔案就開始合併作業. 結果暫存於 tmp 檔##
					
					f1_ofst = unpack( 'I', f1.read(4) )[0]		##取得單數檔的 offset ##
					f1w = self.__getWords( f1_ofst, 128 )		##取單數檔 128 個字##
					f2_ofst = unpack( 'I', f2.read(4) )[0]		##取得雙數檔的 offset ##
					f2w = self.__getWords( f2_ofst, 128 )		##取雙數檔 128 個字##
	
					flag1 = flag2 = "on"						## flag1, flag2 單雙數兩個檔案是否結束的辨識參數##
					while flag1 == "on" or flag2 == "on":
						if flag2 == "off":
							fw.write( pack('I', f1_ofst) )
							fw.write( f1.read() )
							break							
						elif flag1 == "off":
							fw.write( pack('I', f2_ofst) )
							fw.write(f2.read())
							break
						elif f1w <= f2w:
#							if f1w == f2w:	#如果兩個 256 bytes 的長字串相等, 值得 check 一下是否為複製
#								print "***ofst_1: %d, ofst_2: %d following same contents !! file may be duplicated." % ( f1_ofst, f2_ofst )
	
							fw.write( pack('I', f1_ofst) )	# 1 <= 2 寫入 1
							try:	# unpack 不出東西, 回傳錯誤, 表示該 tmp 檔跑完.
								f1_ofst = unpack( 'I', f1.read(4) )[0]
#								self.F.seek( f1_ofst )
								f1w = self.__getWords( f1_ofst, 128 )
							except:
								flag1 = "off"
						elif f1w > f2w:
							fw.write( pack('I', f2_ofst) )	# 1> 2 寫入 2
							try:
								f2_ofst = unpack( 'I', f2.read(4) )[0]
#								self.F.seek( f2_ofst )
								f2w = self.__getWords( f2_ofst, 128 )
							except:
								flag2 = "off"
					f1.close()
					f2.close()
					fw.close()
					
					os.remove( path+f1n )	#合併完後刪除單數檔
					os.remove( path+k )	#刪除雙數檔
					os.rename( path+"tmp", path+f1n )	#將 tmp 更名為單數檔檔名
					i = 0
#					print "check", f1n, k
#					raw_input()
			self.__combinTmps( path, orgfn )
		else:
			self.F.close()
			path2 = path.replace( "tmp/", "" )
			os.rename( path+L[0], path2+orgfn+"_SA" )
			os.rmdir( path[:-1] )	# path = ..../tmp/ << / 去掉才能 rmdir
#			print "%s, %s, suffix array created" % ( L[0], orgfn )  //log檔案

def indexSA(fn, cache=1000000):
	Parser(fn)			#檔案先經過 textparser 預先處理
	return "ok"

