#! -*- coding:utf-8 -*-

'''
from sklearn import svm
X = [[0, 0],[0.1 , 0.1], [1, 1],[1.1,1.1]]
y = [0, 0,1,1]
clf = svm.SVC()
clf.fit(X, y)

print clf.predict([[2., 2.],[1.8,1.9]])

# get support vectors
print clf.support_vectors_
# get indices of support vectors
print clf.support_
# get number of support vectors for each class
print clf.n_support_
'''

#examples taken from here: http://stackoverflow.com/a/1750187


#词向量构建示例


mydoclist = ['Julie loves me more than Linda loves me',
'Jane likes me more than Julie loves me',
'He likes basketball more than baseball']

from collections import Counter

for doc in mydoclist:
  tf = Counter()
  for word in doc.split():
    tf[word] =1
  print tf.items()


import string #allows for format()


def build_lexicon(corpus):
  lexicon = set()
  for doc in corpus:
    lexicon.update([word for word in doc.split()])
  return lexicon


def freq(term, document):
    if document.split().count(term)>0:
        return 1
    else:
        return 0
    #return document.split().count(term)

vocabulary = build_lexicon(mydoclist)

doc_term_matrix = []
print 'Our vocabulary vector is [' + ' '.join(list(vocabulary)) + ']'  # '.join(list(vocabulary)) + ']'
for doc in mydoclist:
  print 'The doc is "' + doc + '"'
  tf_vector = [freq(word, doc) for word in vocabulary]
  tf_vector_string = ', '.join(format(freq, 'd') for freq in tf_vector)
  print 'The tf vector for Document %d is [%s]' % ((mydoclist.index(doc)+1), tf_vector_string)
  doc_term_matrix.append(tf_vector)

  # here's a test: why did I wrap mydoclist.index(doc)+1 in parens? it returns an int...
  # try it! type(mydoclist.index(doc) + 1)

print 'All combined, here is our master document term matrix: '
print doc_term_matrix


#运用决策树等大数据算法，构建模型
