import urllib.request
import re
import base64
import time


# 获取网页源码
def get_html(url):
    request = urllib.request.Request(url)
    request.add_header('user-agent', 'Mozilla/5.0')
    response = urllib.request.urlopen(request)
    html = response.read()
    mystr = html.decode("utf8")
    response.close()
    return mystr


# 提取网页中的SSR账号
def get_ssr():
    str = get_html(r'https://doub.bid/sszhfx/')
    data = re.findall(r"(ssr://[A-Z][\w]*)", str)
    return data


# 获取别人写好的订阅服务器经过base64加密后的SSR列表,解密并返回列表
def base64_decode():
    str_encode = get_html(r'https://sharefanqiang.herokuapp.com/subscribe')
    str_decode = base64.b64decode(str_encode).decode()
    str_list = str_decode.split("\n")
    return str_list
   

# 生成ssr.txt文件,每个SSR节点占一行.并且去除重复的SSR节点
def build_file():
    data_list = []
    data_1 = get_ssr()
    data_2 = base64_decode()
    data = data_1 + data_2
    # 去重
    for i in data:
        if i not in data_list:
            data_list.append(i)
    # 写SSR.txt文件
    f = open('ssr.txt', 'w')
    for i in range(len(data_list)):
        f.write(data_list[i]+"\r\n")
    f.close()


# 对ssr.txt文件进行base64编码,生成ssr_base64.txt
def base64_encode():
    fr = open('ssr.txt', 'rb')
    fw = open('ssr_base64.txt', 'wb')
    base64.encode(fr, fw)
    fr.close()
    fw.close()


# 获取网页SSR节点,生成ssr.txt和订阅服务器需要的文件ssr_base64.txt
if __name__ == '__main__':
    start = time.clock()
    print("running...")
    build_file()
    base64_encode()
    end = time.clock()
    run_time = end - start
    print("run time: %f s" % run_time)
    print("ok")
