# # from concurrent.futures import ThreadPoolExecutor
# # import  queue
# # class BoundedThreadPoolExecutor(ThreadPoolExecutor):
# #     def __init__(self, max_workers=None, thread_name_prefix=''):
# #         super().__init__(max_workers,thread_name_prefix)
# #         self._work_queue = queue.Queue(max_workers * 2)
# # import  time
# # def fun(i__):
# #     time.sleep(2)
# #     print(str(i__) + 'hi')
# # def mk_auth(filename_auth):
# #     pass
# # def mk_ips(ips,ports):
# #     pass
# # ips = {'ip':'','port':'','auth':''}
# #
# # def _tomcat(_ips):
# #     pass
# # def output(result):
# #     pass
# #
# # pool = BoundedThreadPoolExecutor(500)
# # # pool = ThreadPoolExecutor(5)
# # print(time.strftime(''))
# # for i in range(5*10000):
# #     # print(i)
# #     pool.submit(fun,i)
# # print(time.time())
# from concurrent.futures import ThreadPoolExecutor
#
# pool = ThreadPoolExecutor(50,thread_name_prefix='s')
# pool.submit()
# import  time
# def mk_url():
#     ips = range(10000)
#     ports = ['80','8080','8081']
#     auths = ['admin:admin','admin:root','admin:tomcat']
#     for port in ports:
#         for auth in auths:
#             for ip in ips:
#                 yield {'ip':ip,'port':port,'auth':auth}
# def write_ips():
#     ips = range(10000)
#     ports = ['80', '8080', '8081']
#     auths = ['admin:admin', 'admin:root', 'admin:tomcat']
#     with open('formated_ip.txt','w') as f:
#         for port in ports:
#             for auth in auths:
#                 for ip in ips:
#                     f.write('%s:%d:%s' % (port,ip,auth)+'\n')
#
# startime = time.time()
# # for x in mk_url():
# #     pass
# write_ips()
# print('三层for循环所花的时间% f' % (time.time()-startime))

# import concurrent.futures
# import urllib.request
# from concurrent.futures import as_completed
# URLS = ['http://www.foxnews.com/',
#         'http://www.cnn.com/',
#         'http://europe.wsj.com/',
#         'http://www.bbc.co.uk/',
#         'http://some-made-up-domain.com/']
#
# # Retrieve a single page and report the URL and contents
# def load_url(url, timeout):
#     with urllib.request.urlopen(url, timeout=timeout) as conn:
#         return conn.read()
#
# # We can use a with statement to ensure threads are cleaned up promptly
# with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
#     # Start the load operations and mark each future with its URL
#     future_to_url = {executor.submit(load_url, url, 60): url for url in URLS}
#     for future in concurrent.futures.as_completed(future_to_url):
#         url = future_to_url[future]
#         try:
#             data = future.result()
#         except Exception as exc:
#             print('%r generated an exception: %s' % (url, exc))
#         else:
#             print('%r page is %d bytes' % (url, len(data)))

a = 'Basic admin:admin'
from base64 import b64decode,b64encode
en_a = b64encode(a.encode('utf-8'))
de_a = b64decode(en_a.decode('utf-8'))
print(de_a.decode('utf-8'))
