import numpy as np
import pandas as pd

sentence = """Thomas Jefferson began building Monticello at the age of 26."""
token_sequence = str.split(sentence) 
vocab = sorted(set(token_sequence)) 
num_tokens = len(token_sequence)
vocab_size = len(vocab)
onehot_vectors = np.zeros((num_tokens, vocab_size), int) 
for i, word in enumerate(token_sequence): onehot_vectors[i, vocab.index(word)] = 1 
print(' '.join(vocab),'\n')
print(onehot_vectors,'\n')

pd.DataFrame(onehot_vectors, columns=vocab)
df = pd.DataFrame(onehot_vectors, columns=vocab)
df[df == 0] = ''
num_rows = 3000 * 3500 * 15
num_bytes = num_rows * 1000000
sentence_bow = {}
for token in sentence.split(): 
    sentence_bow[token] = 1
sorted(sentence_bow.items())
df = pd.DataFrame(pd.Series(dict([(token, 1) for token in sentence.split()])), columns=['sent']).T
# print(df)
sentences = """Thomas Jefferson began building Monticello at the\ age of 26.\n"""
sentences += """Construction was done mostly by local masons and\ carpenters.\n"""
sentences += "He moved into the South Pavilion in 1770.\n"
sentences += """Turning Monticello into a neoclassical masterpiece\ was Jefferson's obsession."""
corpus = {}
for i, sent in enumerate(sentences.split('\n')):
    corpus['sent{}'.format(i)] = dict((tok, 1) for tok in sent.split())
df = pd.DataFrame.from_records(corpus).fillna(0).astype(int).T
print(df[df.columns[:10]],'\n')


df = df.T
print("\n#度量词袋之间的重合度#")
df.sent0.dot(df.sent1)
df.sent0.dot(df.sent2)
df.sent0.dot(df.sent3)
print([(k, v) for (k, v) in (df.sent0 & df.sent3).items() if v])
print("\n#标点符号的处理#")
import re
sentence = """Thomas Jefferson began building Monticello at the age of 26."""
tokens = re.split(r'[-\s.,;!?]+', sentence)
#print(tokens)
pattern = re.compile(r"([-\s.,;!?])+")
sentence = """Thomas Jefferson began building Monticello at the age of 26."""
tokens = pattern.split(sentence)
print("    re库 \n", [x for x in tokens if x and x not in '- \t\n.,;!?'])
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+|$[0-9.]+|\S+')
from nltk.tokenize import TreebankWordTokenizer
sentence = """Monticello wasn't designated as UNESCO World Heritage Site until 1987."""
tokenizer = TreebankWordTokenizer()
print("    nltk库 \n",tokenizer.tokenize(sentence))
print("\n#缩略语#")
from nltk.tokenize.casual import casual_tokenize
message = """RT @TJMonticello Best day everrrrrrr at Monticello. Awesommmmmmeeeeeeee day :*)"""
# casual_tokenize(message)
print(casual_tokenize(message, reduce_len=True, strip_handles=True))


print("\n#ngram词词汇表#")
import nltk
nltk.download('stopwords')
stop_words = nltk.corpus.stopwords.words('english')
print(len(stop_words))
print(stop_words[:7])
print("\n#词汇表规范化#")
tokens = ['House', 'Visitor', 'Center']
normalized_tokens = [x.lower() for x in tokens]
print(normalized_tokens)
def stem(phrase):
    return ' '.join([re.findall('^(.*ss|.*?)(s)?$', word)[0][0].strip("'") for word in phrase.lower().split()])
print(stem("Doctor House's calls"))
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
print(' '.join([stemmer.stem(w).strip("'") for w in "dish washer's washed dishes".split()]))
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
print(lemmatizer.lemmatize("better"),
      lemmatizer.lemmatize("better", pos="a"),
      lemmatizer.lemmatize("good", pos="a"),
      lemmatizer.lemmatize("goods", pos="a"),
      lemmatizer.lemmatize("goods", pos="n"),
      lemmatizer.lemmatize("goodness", pos="n"),
      lemmatizer.lemmatize("best", pos="a"))





print("\n\n#文本情感分析#")
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sa = SentimentIntensityAnalyzer()
print(sa.polarity_scores(text= "Python is very readable and it's great for NLP."))
print( sa.polarity_scores(text="Python is not a bad choice for most applications."))
corpus = ["Absolutely perfect! Love it! :-) :-) :-)",
          "Horrible! Completely useless. :(",
          "It was OK. Some good and some bad things."]
for doc in corpus:
    scores = sa.polarity_scores(doc)
    print('{:+}: {}'.format(scores['compound'], doc))
from nlpia.data.loaders import get_data
movies = get_data('hutto_movies')
movies.head().round(2)
movies.describe().round(2)
import pandas as pd
pd.set_option('display.width', 75)
from nltk.tokenize import casual_tokenize
bags_of_words = []
from collections import Counter
for text in movies.text:
    bags_of_words.append(Counter(casual_tokenize(text)))
df_bows = pd.DataFrame.from_records(bags_of_words)
df_bows = df_bows.fillna(0).astype(int)
#df_bows.shape
#df_bows.head()
df_bows.head()[list(bags_of_words[0].keys())]

from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
nb = nb.fit(df_bows, movies.sentiment > 0)
movies['predicted_sentiment'] = 1
movies['sentiment_ispositive'] = 1
movies['predicted_sentiment'] = nb.predict_proba(df_bows) * 8 - 4
movies['error'] = (movies.predicted_sentiment - movies.sentiment).abs()
round(movies['error'].mean(),1)
movies['sentiment_ispositive'] = (movies.sentiment > 0).astype(int)
movies['predicted_ispositive'] = (movies.predicted_sentiment > 0).astype(int)
movies['''sentiment predicted_sentiment sentiment_ispositive predicted_ispositive'''.split()].head(8)
products = get_data('hutto_products')
bags_of_words = []
for text in products.text:
    bags_of_words.append(Counter(casual_tokenize(text)))
df_product_bows = pd.DataFrame.from_records(bags_of_words)
df_product_bows = df_product_bows.fillna(0).astype(int)
df_all_bows = df_bows.append(df_product_bows)
df_all_bows.columns
df_product_bows = df_all_bows.iloc[len(movies):][df_bows.columns]
products['ispos'] = (products.sentiment > 0).astype(int)
products['predicted_ispositive'] = 1
df_product_bows.fillna(1,inplace=True)
products['predicted_ispositive'] = nb.predict(df_product_bows.values).astype(int)

products.head()
print((products.predicted_ispositive == products.ispos).sum() / len(products))




