import requests, os, re, io, sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030') #改变标准输出的默认编码
sss = []

def getUrls():
    path = "urls.txt"
    print("enter the path of file which has the urls:(default is urls.txt)")
    inPath = input()
    if inPath != "":
        path = inPath
    urls = []
    if os.path.exists(path):
        with open(path, "rt") as ff:
            lines = ff.readlines(100)
            while lines:
                for line in lines:
                    urls.append(line.strip('\n'))
                lines = ff.readlines(100)
    else:
        print("The file %s don't exists." % path)
    return urls

def getHtml(url):
    response = requests.get(url)
    html = response.text
    return html

def getSubUrls(html):
    pattern = re.compile(r"href")
    subUrls = pattern.findall(html)

def findSS(html):
    # html = html.decode("gbk")
    # print(html)
    pattern = re.compile(r'ss:\/\/\w*')
    # print(pattern.findall(html))
    fss = pattern.findall(html)
    for ss in fss:
        if ss not in sss:
            sss.append(ss)

def main():
    begin = "https://www.google.com/search?q=shadowsocks%E5%85%8D%E8%B4%B9%E8%B4%A6%E5%8F%B7&oq=shadowsocks%E5%85%8D%E8%B4%B9%E8%B4%A6%E5%8F%B7&gs_l=psy-ab.3...133910.147648.0.147877.34.24.0.0.0.0.891.3774.3-2j2j2j1.7.0....0...1.1j4.64.psy-ab..28.6.3275.0..0j35i39k1j0i13k1j0i12k1.IylIX18yWHs"
    oPath = "ssAccouts.txt"
    if os.path.exists(oPath):
        with open(oPath, "rt") as of:
            lines = of.readlines(100)
            while lines:
                for line in lines:
                    sss.append(line)
                    lines = of.readlines(100)

    urls = getUrls()
    for url in urls:
        findSS(getHtml(url))
    with open(oPath, "w+") as of:
        for ss in sss:
            of.write(ss+'\n')

if __name__ == "__main__":
    main();
