# -*- coding:utf-8 -*-

import re

txt_list = [
'CSCWD (Selected papers)',

'FSKD (2)',

'Web Intelligence/IAT Workshops',

'International Conference on Computational Science (2)',

'Proceedings of the 2009 WRI International Conference on Communications and Mobile Computing - Volume 01',

'Proceedings of the 3rd international conference on Technologies for E-Learning and Digital Entertainment',


'Computer Science R D',
]

def get_tokens(sentence):
    return [tok.lower() for tok in re.findall(r"\w+", sentence)]


def drop_symbol(sentence):
    # url_pattern = "(https?|http)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]"
    # url_pattern = '\(|\)|：|:|；|;|‘|’|！|“|"|〃|”|—|－|―|……|…|、|\\|/|\{ |\} |\*|＊|=|\t{2,}|\s{2,}|\t\s|\s\t|\n|\,|，|_|-|%|-_-|&gt|&lt|#|『|』'
    # |[A-Z]{2,}

    # url_pattern = '\(|\)|[0-9]|\*|\s+|\t+| - |/'
    # url_pattern = ' - |\(|\)|[0-9]|\*|\s+|\t+|/'
    # url_pattern = ' - |\(|\)|[0-9]|\s+|\t+|/'

    # url_pattern = ' - |\(|\)|[0-9]|\s+|\t+|/'
    # url_pattern = '[0-9](rd|th|st)'
    url_pattern = ' ([a-z]|[A-Z])( |$)'
    # ([0-9]+)
    # + 和 [0-9]待定 依据效果 决定是否去除
    pattern_rule = re.compile(url_pattern)
    sentence = re.sub(pattern_rule, " ", sentence)
    return sentence.strip()

# 清洗文本
def clean_text(sentence):
    # sentence = drop_link(sentence)
    sentence = drop_symbol(sentence)
    sentence = drop_symbol(sentence) # 清楚 替换过程，即上一行函数，所产生的空格
    return sentence.strip() # 默认删除空白符 \n \r \t


# for txt in txt_list:
#     print clean_text(txt)

# txt = 'Computing - Volume'
# print clean_text(txt)

print clean_text( txt_list[5] )

print drop_symbol(txt_list[6])
print drop_symbol(txt_list[6].strip())

print drop_symbol( drop_symbol(txt_list[6]) )
# print clean_text( txt_list[6] )

