import  os
import numpy
import tensorflow as tf
import pandas as pd
from tensorflow.contrib.learn.python.learn.estimators import svm

svm_dim_train_file='';
svm_dim_test_file='';


def getPandasFrame():
    return pd.read_csv(svm_dim_train_file);
pf=getPandasFrame();

#csv索引标题列
indextitles=[i for i in range(2000)];
#特征属性数组
classcols=[];
#最多一千个特征
classcols=[tf.contrib.layers.sparse_column_with_hash_bucket(
    column_name=title, hash_bucket_size=1000)
for title in indextitles];
LABEL_COLUMN="label";
COLUMNS=[title for title in indextitles,'label'];
FEATURE_COLUMNS=indextitles;

df_train=pd.read_csv(svm_dim_train_file,NAMES=COLUMNS,skipinitialspace=True);
df_test=pd.read_csv(svm_dim_test_file,NAME=COLUMNS,skipinitialspace=True);
df_train=df_train.dropna(how='any',axis=0);
df_test=df_test.dropna(how='any',axis=0);
df_train[LABEL_COLUMN]=(df_train[LABEL_COLUMN].apply(lambda x:'+' in x )).astype(int);
df_test[LABEL_COLUMN]=(df_test[LABEL_COLUMN].apply(lambda x:'+' in x)).astype(int);

def input_fn(df):
    categorical_cols={k:tf.SparseTensor(indices=[[i,0] for i in range(df[k].size)], values=df[k].values,dense_shape=[df[k].size,1]) for k in COLUMNS};
    feature_cols=dict(categorical_cols.items());
    feature_cols['example_id']=tf.constant(str(i+1) for i in range(df['0'].size()));
    label=tf.constant(df[LABEL_COLUMN].values);
    return feature_cols,label;
def train_input_fn():
    return input(df_train);
def eval_input_fn():
    return input_fn(df_test);

model_dir='';
model=svm.SVM(example_id_column='example_id',
                feature_columns=FEATURE_COLUMNS,
                model_dir=model_dir)
model.fit(input_fn=train_input_fn,steps=100);
results = model.evaluate(input_fn=eval_input_fn, steps=1)
for key in sorted(results):
    print("%s: %s" % (key, results[key]))


