#!/usr/bin/env python
# -*- coding:utf-8 -*-

import nltk
import re 
from nltk.corpus import brown

# 3.2 字符串: 最底层的文本处理
"""
一个文件的内容是由一个叫做字符串的基本数据类型来表示的.
加法和乘法运算仅适用于数字和字符串.
请注意,不能对字符串用减法或除法.
"""

# 1.字符串基本操作
# 可以使用单引号或双引号来指定字符串. 当一个字符串中包含一个单引号,必须在单引号前加反斜杠.
"""
monty = 'Monty Python'
print(monty)
circus = "Monty Python's Flying Circus"
print(circus)
circus = 'Monty Python\'s Flying Circus'
print(circus)
"""

# 一个包含两个字符串的序列被连接为一个字符串. 反斜杠可以换行. 使用反斜杠或者括号
"""
couplet = "Shall I compare thee to a Summer's day?" \
    "Thou are more lovely and more temperate:"
print(couplet)
couplet = ("Rough winds do shake the darling buds of May,"
        "And Summer's lease hath all too short a date.")
print(couplet)        
"""

# 使用三引号来将一段话换行.
couplet = """Shall I compare thee to a Summer's day?
    Thou are more lovely and more temperate."""
"""
print(couplet)
couplet = '''Rough winds do shake the darling buds of May,
        And Summer's lease hath  all too short a date.'''
print(couplet)
"""

# 连接: 将两个原始字符串首尾相连粘贴复制而成.
"""
print('very' + 'very' + 'very')
print('very' * 3)

a = [1,2,3,4,5,6,7,6,5,4,3,2,1]
b = [' ' * 2 * (7 - i) + 'very' * i for i in a]
for line in b:
    print(b)
"""

"""
print('very' - 'y')
print('very' / 2)
说明:减法操作不适用于对象类型str
TypeError: unsupported operand type(s) for -: 'str' and 'str'

说明: 除法的两个操作数不能分别为str和int.
TypeError: unsupported operand type(s) for /: 'str' and 'int'
"""




# 2. 输出字符串
# print语句可以多种方式显示多行.
"""
grail = 'Holy Grail'
monty = 'Monty Python'
print(monty + grail)
print('=='*30)
print(monty,grail)
print(monty,"and the", grail)
"""

# 3. 访问单个字符
# 字符串: 索引从0开始.
"""
grail = 'Holy Grail'
monty = 'Monty Python'
print(monty[0])
print(monty[3])
print(monty[6])

# 字符串的负数索引.
# for循环,遍历字符串的字符.
sent = 'colorless green ideas sleep furiously'
for char in sent:
    print(char)

# 计数单个字符,将所有字符小写来忽略大小写的区分,并过滤掉非字母字符.
import nltk
from nltk.corpus import gutenberg
raw = gutenberg.raw('melville-moby_dick.txt')
fdist = nltk.FreqDist(ch.lower() for ch in raw if ch.isalpha())
fdist.keys()
"""


# 访问子字符串
"""
monty = 'Monty Python'
print(monty[6:10])
# 正数索引,切片开始与第一个索引,但结束于最后一个索引的前一个.
# 负数索引切片: 从第一个索引开始到最后一个索引的前一个结束.
print(monty[-12:-7])
print('='*20)
print(monty[:5])
print(monty[6:])
# 使用in操作符测试一个字符串是否包含一个特定的子字符串
phrase = 'And now for something completely different'
if 'thing' in phrase:
    print('found "thing"')

print('-'*30)
# 使用find()找到一个子字符串在字符串内的位置.
print(monty.find('Python'))
"""



# 链表与字符串的差异:
# 字符串和链表都是序列,可以通过索引抽取或切片,可使用连接将它们合并在一起. 
# 字符串和链表之间不能连接.
"""
query = 'Who knows?'
beatles = ['John','Paul','George','Ringo']
print(query[2])
print('-'*20)
print(beatles[2])
print('-='*20)
print(query[:2])
print(beatles[:2])
print('=-'*30)
print(query + " I don't")
print(beatles + 'Brian')
print(beatles + ['Brian'])
"""

# 链表优点是可以灵活的决定它包含的元素.
"""
beatles = ['John','Paul','George','Ringo']
beatles[0] = "John Lennon"
print(beatles[-1])
del beatles[-1]
print(beatles)
"""
# 字符串不可变。链表可变,其内容可以随时修改,链表支持修改原始值的操作.




# 3.3 使用Unicode进行文字处理
"""
import codecs
path = nltk.data.find('corpora/unicode_samples/polish-lat2.txt')
f = codecs.open(path,encoding='latin2')
for line in f:
    line = line.strip()
    print(line.encode('unicode_escape'))

# 使用ord()查找一个字符的整数序数.
# unicodedata模块检查Unicode字符的属性.
import unicodedata
lines = codecs.open(path,encoding='latin2').readlines()
line = lines[2]
print(line.encode('unicode_escape'))

for c in line:
    if ord(c) > 127:
        print('%r U+%04x %s' % (c.encode('utf8'),ord(c),unicodedata.name(c)))

print('=='*20)
line.find(u'zosta\u0142y')
line = line.lower()
print(line.encode('unicode_escape'))
print('-=-='*30)
import re
m = re.search(u'\u015b\w*',line)
print(m.group())
# NLTK分词器允许Unicode字符串作为输入,并输出相应地Unicode字符串.
nltk.word_tokenize(line)
"""



# 在Python中使用本地编码
# import re 
sent = """
    Przewiezione przez Niemcow pod koniec II wojny $wiatowej na
    Dolny $lask, zostaly odnalezione po 1945 r. na terytorium Polski.
"""
"""
# u = sent.decode('utf8')
sent.lower()
print(sent.encode('utf8'))

SACUTE = re.compile('$|$')
replaced = re.sub(SACUTE,'[sacute]',sent)
print(replaced)
"""




# 3.4 使用正则表达式检测词组搭配
# 使用词汇语料库,对它进行预处理消除某些名称
"""
import re 
wordlist = [w for w in nltk.corpus.words.words('en') if w.islower()]
print(wordlist)
print('=='*20)

# 使用基本的元字符
# 函数re.search(p,s)检查字符串s中是否有模式p.
print([w for w in wordlist if re.search('ed$',w)])

# 通配符: "." 匹配任何单个字符.
print([w for w in wordlist if re.search('^..j..t..$',w)])
# 符号: ^ 匹配字符串的开始.
# 符号: $ 匹配字符串的结尾.
"""



# 范围与闭包
"""
wordlist = [w for w in nltk.corpus.words.words('en') if w.islower()]

print([w for w in wordlist if re.search('^[ghi][mno][jlk][def]$',w)])
# + 符号,可适用于单个字母或括号内的字母集.

chat_words = sorted(set(w for w in nltk.corpus.nps_chat.words()))
print([w for w in chat_words if re.search('^m+i+n+e+$',w)])
print('-='*20)
print([w for w in chat_words if re.search('^[ha]+$',w)])


# 用来寻找匹配特定模式的词汇标识符
wsj = sorted(set(nltk.corpus.treebank.words()))
print([w for w in wsj if re.search('^[0-9]+\.[0-9]+$',w)])
print('='*20)
print([w for w in wsj if re.search('^[A-Z]+\$$',w)])
print('-='*20)
print([w for w in wsj if re.search('^[0-9]{4}$',w)])
print('=--'*20)
print([w for w in wsj if re.search('^[0-9]+-[a-z]{3,5}$',w)])
print('+'*20)
print([w for w in wsj if re.search('^[a-z]{5,}-[a-z]{2,3}-[a-z]{,6}$',w)])
print('+='*20)
print([w for w in wsj if re.search('(ed|ing)$',w)])
"""





# 3.5 正则表达式的有益应用

# 提取字符块
# re.findall() 方法找出所有匹配指定正则表达式的.
"""
word = 'supercalifragilistic expia lidoc ious' 
print(re.findall(r'[aeiou]',word))
print(len(re.findall(r'[aeiou]',word)))

wsj = sorted(set(nltk.corpus.treebank.words()))
fd = nltk.FreqDist(vs for word in wsj for vs in re.findall(r'[aeiou]{2,}',word))
print(fd.items())
print('-='*20)
"""

# 在字符块上做更多事情
# 用re.findall()从词中提取字符块. 
# 使用re.findall()提取所有匹配的词中的字符,然后使用''.join()将它们连接在一起.
"""
regexp = r'^[AEIOUaeiou]+|[AEIOUaeiou]+$|[^AEIOUaeiou]'
def compress(word):
    pieces = re.findall(regexp,word)
    return ''.join(pieces)

english_udhr = nltk.corpus.udhr.words('English-Latin1')
print(nltk.tokenwrap(compress(w) for w in english_udhr[:75]))

print('=--'*20)
# 将从罗托卡特语词汇中提取所有辅音-元音序列.
rotokas_words = nltk.corpus.toolbox.words('rotokas.dic')
cvs = [cv for w in rotokas_words for cv in re.findall(r'[ptksvr][aeiou]',w)]
cfd = nltk.ConditionalFreqDist(cvs)
print(cfd.tabulate())


# 检查表格中数字背后的词汇,
cv_word_pairs = [(cv,w) for w in rotokas_words
    for cv in re.findall(r'[ptksvr][aeiou]',w)]
cv_index = nltk.Index(cv_word_pairs)
print(cv_index['su'])
print(cv_index['po'])
"""



# 查找词干
"""
print(re.findall(r'^.*(ing|ly|ed|ious|ies|ive|es|s|ment)$','processing'))
print(re.findall(r'^.*(?:ing|ly|ed|ious|ies|ive|es|s|ment)$','processing'))
# 将词分成词干和后缀,用括号括起正则表达式的这两部分.
print(re.findall(r'^(.*)(ing|ly|ed|ious|ies|ive|es|s|ment)$','processing'))

print(re.findall(r'^(.*)(ing|ly|ed|ious|ies|ive|es|s|ment)$','processes'))
# * 操作符是贪婪的.  .* 尽可能多的匹配输入的字符串.
# 非贪婪: *?
print(re.findall(r'^(.*?)(ing|ly|ed|ious|ies|ive|es|s|ment)$','processes'))
# 使第二个括号中的内容变成可选,来得到空后缀:
print(re.findall(r'^(.*?)(ing|ly|ed|ious|ies|ive|es|s|ment)?$','language'))

# 定义一个函数来获取词干,并将它应用到整个文本
def stem(word):
    regexp = r'^(.*?)(ing|ly|ed|ious|ies|ive|es|s|ment)?$'
    stem,suffix = re.findall(regexp,word)[0]
    return stem
"""

raw = """DENNIS: Listen, strange women lying in ponds distributing swords
    is no basis for a system of goverment. Supreme executive power derives 
    from a mandate from the masses, not from some farcical aquatic ceremony."""
"""
tokens = nltk.word_tokenize(raw)
print([stem(t) for t in tokens])
"""



# 搜索已分词文本
# <.*> 将匹配所有单个标识符,将它括在括号里.只匹配词,而不匹配短语.
"""
from nltk.corpus import gutenberg,nps_chat

moby = nltk.Text(gutenberg.words('melville-moby_dick.txt'))
print(moby.findall(r"<a> (<.*>) <man>"))

print('-='*20)
chat = nltk.Text(nps_chat.words())
print(chat.findall(r"<.*> <.*> <bro>"))
print(chat.findall(r"<1.*>{3,}"))
"""


"""
from nltk.corpus import brown
hobbies_learned = nltk.Text(brown.words(categories=['hobbies','learned']))
print(hobbies_learned.findall(r"<\w*> <and> <other> <\w*s>"))
"""


# 3.6 规范化文本
# 处理文本词汇前要将文本转换为小写: set(w.lower() for w in text)
# 使用lower()将文本规范化为小写.
raw = """DENNIS: Listen, strange women lying in ponds distributing swords
    is no basis for a system of goverment. Supreme executive power derives 
    from a mandate from the masses, not from some farcical aquatic ceremony."""
"""
tokens = nltk.word_tokenize(raw)
print(tokens)
"""




# 词干提取器
# Porter和Lancaster词干提取器按照自己的规则剥离词缀.
# Porter词干提取器可索引一些文本和使搜索支持不同词汇形式的话.
"""
porter = nltk.PorterStemmer()
lancaster = nltk.LancasterStemmer()
print([porter.stem(t) for t in tokens])
"""

# 使用词干提取器索引文本
"""
class IndexedText(object):
    def __init__(self,stemmer,text):
        self._text = text 
        self._stemmer = stemmer
        self._index = nltk.Index((self._stem(word),i)
                for (i,word) in enumerate(text))

    def concordance(self,word,width=40):
        key = self._stem(word)
        wc = width/4
        for i in self._index[key]:
            lcontext = ''.join(self._text[i - wc:i])
            rcontext = ' '.join(self._text[i:i + wc])
            ldisplay = '%*s' %(width,lcontext[-width:])
            rdisplay = '%-*s'%(width,rcontext[:width])
            print(ldisplay,rdisplay)
    
    def _stem(self,word):
        return self._stemmer.stem(word).lower()


porter = nltk.PorterStemmer()
grail = nltk.corpus.webtext.words('grail.txt')
text = IndexedText(porter,grail)
print(text.concordance('lie'))

报错:
TypeError: slice indices must be integers or None or have an __index__ method
"""



# 词形归并
# WordNet词形归并器删除词缀产生的词都是在它字典中的词.
raw = """DENNIS: Listen, strange women lying in ponds distributing swords
    is no basis for a system of goverment. Supreme executive power derives 
    from a mandate from the masses, not from some farcical aquatic ceremony."""

"""
tokens = nltk.word_tokenize(raw)
wnl = nltk.WordNetLemmatizer()
print([wnl.lemmatize(t) for t in tokens])
"""






# 3.4 用正则表达式为文本分词
# 分词是将字符串切割成可识别的构成一块语言数据的语言单元.

# 分词的简单方法:
# 简单方法是: 在空格符处分割文本.
raw = """When I'M a Duchess,' she said to herself, (not in a very hopeful tone though),
'I won't have any pepper in my kitchen AT ALL. Soup does very
well without--Maybe it's always pepper that makes people hot-tempered,'..."""


# 使用raw.split()在空格符处分割原始文本.
# print(re.split(r' ',raw))
# print(re.split(r'[ \t\n]+',raw))

"""
正则表达式[ \t\n]+ 匹配一个或多个空格,制表符或换行符.
    \s 它表示匹配所有空白字符.

在正则表达式前加字母:"r", 它告诉Python解释器按照字面表示对待字符串而不去处理正则表达式中包含的反斜杠字符.

\W 所有字母,数字和下划线以外的字符. 
用\W来分割所有单词字符以外的输入.
"""
# print(re.split(r'\W+',raw))


"""
在开始和结尾给了一个空字符串.
通过re.findall(r'\w+',raw) 使用模式匹配词汇. 
得到相同的标识符,但没有空字符串.

正则表达式\w+|\S\w* 将首先尝试匹配词中字符的所有序列.
"""

# print(re.findall(r'\w+|\S\w*',raw))

"""
允许连字符和撇号: \w+([-']\w+)*, 
这个表达式表示"\w+" 后面跟零个或更多"[-']\w+" 的实例.
    它会匹配hot-tempered和it's.
    添加一个模式来匹配引号字符让它们与它们包括的文字分开.

"""

# print(re.findall(r"\w+(?:[-']\w+)*|'|[-.(]+|\S\w*",raw))




# NLTK的正则表达式分词器
# nltk.regexp_tokenize()分词效率更高.



# 3.8 分割
# 分词是一个更普遍的分割问题的一个实例.

# 断句
# 计算布朗语料库中每个句子的平均词数
"""
print(len(nltk.corpus.brown.words())/ len(nltk.corpus.brown.sents()))

sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
text = nltk.corpus.gutenberg.raw('chesterton-thursday.txt')
sents = sent_tokenizer.tokenize(text)
print(sents[171:181])
"""




# 从分词表示字符串seg1和seg2中重建文本分词,
# seg1和seg表示假设的一些儿童讲话的初始和最终分词.
# 函数segment()可以使用它们重现分词的文本
"""
def segment(text,segs):
    words = []
    last = 0
    for i in range(len(segs)):
        if segs[i] == '1':
            words.append(text[last:i+1])
            last = i+1
    words.append(text[last:])
    return words

text = "doyouseethekittyseethedoggydoyoulikethekittylikethedoggy"
seg1 = "0000000000000001000000000010000000000000000100000000000"
seg2 = "0100100100100001001001000010100100010010000100010010000"
print(segment(text,seg1))
print(segment(text,seg2))
"""



"""
计算目标函数: 给定一个假设的源文本的分词,推导出一个词典和推导表,
它能源文本重构,然后合计每个词项(包括边界标志)与推导表的字符数,
作为分词质量的得分；得分值越小表明分词越好.
"""
# 计算存储词典和重构源文本的成本
"""
def evaluate(text,segs):
    words = segment(text,segs)
    text_size = len(words)
    lexicon_size = len(' '.join(list(set(words))))
    return text_size + lexicon_size


text = "doyouseethekittyseethedoggydoyoulikethekittylikethedoggy"
seg1 = "0000000000000001000000000010000000000000000100000000000"
seg2 = "0100100100100001001001000010100100010010000100010010000"
seg3 = "0000100100000011001000000110000100010000001100010000001"

print(segment(text,seg3))
print(evaluate(text,seg3))
print(evaluate(text,seg2))
print(evaluate(text,seg1))
# 最后一步是寻找最大化目标函数值的0和1的模式.
"""


"""
使用模拟退火算法的非确定性搜索: 一开始仅搜索短语分词; 随机扰动0和1,
它们与"温度" 成比例, 每次迭代温度都会降低,扰动边界会减少.
"""
"""
from random import randint
def flip(segs,pos):
    return segs[:pos] + str(1-int(segs[pos])) + segs[pos+1:]

def flip_n(segs,n):
    for i in range(n):
        segs = flip(segs,randint(0,len(segs)-1))
    return segs


def anneal(text,segs,iterations,cooling_rate):
    temperature = float(len(segs))
    while temperature > 0.5:
        best_segs, best = segs,evaluate(text,segs)
        for i in range(iterations):
            guess = flip_n(segs,int(round(temperature)))
            score = evaluate(text,guess)
            if score < best:
                best, best_segs = score,guess
        
        score,segs = best,best_segs
        temperature = temperature / cooling_rate
        print(evaluate(text,segs),segment(text,segs))
    print 
    return segs 

text = "doyouseethekittyseethedoggydoyoulikethekittylikethedoggy"
seg1 = "0000000000000001000000000010000000000000000100000000000"
print(anneal(text,seg1,5000,1.2))
# 以一个合理的准确度自动将文本分割成词汇,这种方法可用于那些词的边界没有任何视觉表示的书写系统分词.
"""




# 3.9 格式化:从链表到字符串
# 文本处理的最简单的一种结构化对象是词链表.
# 输出到文件时,需要把这些词的链表转换成字符串.
"""
silly = ['We','called','him','Tortoise','because','he','taught','us','.']
print(' '.join(silly)) # 取出silly中的所有项目,将它们连接到一个大的字符串.
print(';'.join(silly))
print(''.join(silly))
"""
"""
' '.join(silly)) 
取出silly中的所有项目,将它们连接到一个大的字符串,
使用' '作为项目之间的间隔符. 
即join()是一个你想要用来作为胶水的字符串的一个方法.
join()方法只适用于一个字符串的链表.
"""





# 字符串与格式
"""
word = 'cat'
sentence = "my name is Jacky"
print(word)
print(sentence)

# 格式化输出包含变量和预先指定的字符串的一个组合.
fdist = nltk.FreqDist(['dog','cat','dog','cat','dog','snake','dog','cat'])
for word in fdist:
    print(word, '->', fdist[word],';',)

# 使用字符串格式化表达式
for word in fdist:
    # print('%s -> %d ;' % (word,fdist[word]),)
    print( '{} => {} ;'.format(word,fdist[word]),)


print('I want a %s right now' % 'coffee')

# for循环
template = 'Lee wants a {} right now'
menu = ['sandwich','spam fritter','pancake']
for snack in menu:
    print( template.format(snack))
"""




# 排列
"""
print('%6s' % 'dog')
print('%-6s' % 'dog')
width = 6
print('%-*s'%(width,'dog'))

count, total = 3205, 9375
print("accuracy for %d words: %2.4f%%" % (total, 100 * count / total))



# 布朗语料库的不同部分的频率模型
def tabulate(cfdist,words,categories):
    print('%-16s' % 'Category',)
    for word in words:
        print('%6s' % word,)
    print

    for category in categories:
        print('%-16s' % category,)
        for word in words:
            print('%6d' % cfdist[category][word],)
        print 


cfd = nltk.ConditionalFreqDist(
    (genre, word)
    for genre in brown.categories()
    for word in brown.words(categories=genre))

genres = ['news','religion','hobbies','science_fiction','romance','humor']
modals = ['can','could','may','might','must','will']
print(tabulate(cfd, modals,genres),end='\t')

print('%*s' % (15, "Monty Python"))

# 自动定制列的宽度
words = "my name is jacky"
width = max(len(w) for w in words)
print(width)
"""






# 将结果写入文件
"""
output_file = open('F:\\PythonProject\\AI\\NLP\\自然语言处理笔记\\scripts\\output.txt','w')
words = set(nltk.corpus.genesis.words('english-kjv.txt'))
for word in sorted(words):
    output_file.write(word + "\n")
    
# 当将非文本数据写入文件时,先将它转换为字符串.
print(len(words))
print(str(len(words)))
output_file.write(str(len(words)) + "\n")
output_file.close()
"""




# 文本换行
from textwrap import fill

saying = ['After','all','is','said','and','done',',',
        'more','is','said','than','done','.']
format = '%s_(%d),'
pieces = [format % (word,len(word)) for word in saying]
output = ' '.join(pieces)
wrapped = fill(output)
print(wrapped)
