from urllib.parse import urlparse
from socket import AF_INET, SOCK_DGRAM, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR, socket
from requests import get, head
from json import load, dumps
from time import strftime, gmtime
from threading import Thread
from os import stat, mkdir
from os.path import exists
from typing import Tuple
import sys
# import getopt
# import argparse
import traceback
import re
# 使用到的包：urllib, socket, requests, json, time, threading, os, getopt, argparse, traceback, re
def get_lan_ip():
	s = socket(AF_INET, SOCK_DGRAM)
	try:
		# doesn't have to be reachable
		s.connect(('10.255.255.255', 1))
		IP = s.getsockname()[0]
	except Exception:
		IP = '127.0.0.1'
	finally:
		s.close()
	return IP
def status_html(status_message: str) -> str:
	""" 生成状态码的HTML """
	return f'<html><head><title>{status_message}</title></head><body><h1>{status_message}</h1></body></html>'
def validate_ip_and_port(ip_port: str):
	# 使用正则表达式检查 IP 地址和端口号的格式是否正确
	pattern = r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+$'
	if re.match(pattern, ip_port):
		return True
	return False
def validate_ip(ip: str):
	# 使用正则表达式检查 IP 地址的格式是否正确
	pattern = r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$'
	if re.match(pattern, ip):
		return True
	return False
def validate_port(port: str):
	# 使用正则表达式检查端口号的格式是否正确
	pattern = r'^\d+$'
	if re.match(pattern, port):
		return True
	return False
class ProxyServer(object):
	""" 对于使用https协议的无法进行处理 """

	def __init__(self, port, filter_json, page404, page403):
		self.sever_port = port  # 代理服务器的主要端口
		self.server_main_socket = socket(
			AF_INET, SOCK_STREAM)
		# 负责接受连接请求的main socket
		self.server_main_socket.setsockopt(
			SOL_SOCKET, SO_REUSEADDR, 1)
		# 使用完立即释放
		self.MAX_LISTEN = 10
		self.server_main_socket.bind(('', self.sever_port))
		self.server_main_socket.listen(self.MAX_LISTEN)  # 最大连接数
		self.HTTP_BUFFER_SIZE = 2048  # HTTP缓存大小
		self.__cache_dir = './cache/'  # cache路径
		self.__make_cache()  # 生成当前的cache路径
		self.host_denied = filter_json['host']
		self.ip_denied = filter_json['ip']
		self.fishing = filter_json['fishing']
		self.fishing_to = filter_json['fishing_to']
		self.page404 = page404
		self.page403 = page403

	def __make_cache(self):
		if not exists(self.__cache_dir):
			mkdir(self.__cache_dir)

	# @staticmethod
	def filter_web(self, url):
		host_denied = self.host_denied
		""" 用于过滤禁用网站 """
		if url in host_denied:
			return True
		return False

	# @staticmethod
	def filter_ip(self, ip):
		ip_denied = self.ip_denied
		if not ip_denied:
			return False
		""" 用于禁用制定IP """
		if ip in ip_denied:
			return True
		return False

	# @staticmethod
	def filter_fishing(self, url):
		fishing = self.fishing
		if not fishing:
			return False
		""" 用于实现钓鱼网站 """
		if url in fishing:
			return True
		return False

	def proxy_connect(self, client_socket, address):
		try:
			""" 用于实现代理服务器连接和缓存功能 """
			messageNotDecode = client_socket.recv(self.HTTP_BUFFER_SIZE)
			message: str = messageNotDecode.decode(
				'utf-8', 'ignore')  # 以utf-8进行解码 同时忽略二进制文件  防止出现UnicodeDecodeError  但是这样会导致无法访问二进制文件   message为HTTP请求报文 为str类型
			headers = message.split('\r\n')  # 以\r\n将HTTP请求报文的头部进行提取
			request_line = headers[0].strip().split()  # 请求报文的第一行为Request Line
			# 将Request Line的method URL和version 3个部分分开
			if len(request_line) < 1:  # Request Line中可能没有URL
				print("Request Line not contains url!")
				print("Full Request Message:", message)
				client_socket.close()  # 关闭连接sock
				return
			else:
				middle_request_line = request_line[1]
				url = urlparse(middle_request_line[:-1] if middle_request_line[-1] == '/' else middle_request_line)  # 提取Request Line中的URL 并去除末尾的'/'
				if url.scheme != 'http':
					print("Not HTTP Protocol!")
					client_socket.close()
					return
				hostname = url.hostname  # 提取URL中的hostname
				path = url.path  # if url.path else '/'
				path = path if path else '/'
				port = url.port  #
				port = port if port else 80  # 如果没有指定端口号 则默认为80
				geturl = url.geturl()
			# if ('test' not in path) and ('json' not in path):
			# 	return
			if not hostname or not path or not port or not geturl:
				print("URL is not valid!")
				# print("Full Request Message:", message)
				client_socket.close()
			if self.filter_web(hostname):  # 如果需要过滤某个网站
				client_socket.sendall(self.page404)
				client_socket.close()
				return
			if self.filter_ip(address[0]):  # 如果需要过滤某个IP
				client_socket.sendall(self.page403)
				client_socket.close()
				return
			if self.filter_fishing(hostname):  # 将需要钓鱼的网站重定向到指定的网站
				client_socket.sendall(get(self.fishing_to).content)
				client_socket.close()
				return
			cache_path = self.__cache_dir + (hostname + path).replace('/', '_')  # 缓存目录
			flag_modified = False  # 默认缓存没有更改
			flag_exists = exists(cache_path)  # 检测缓存目录是否存在
			server_socket = socket(AF_INET, SOCK_STREAM)
			if flag_exists:
				cache_time = stat(cache_path).st_mtime  # 获取缓存的时间
				headers = {'If-Modified-Since': strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(cache_time))}
				response = head(geturl, headers=headers)
				if response.status_code == 304:  # 如果返回304 则无需进行重新访问
					print("Read From Cache " + cache_path)
					with open(cache_path, "rb") as f:
						client_socket.sendall(f.read())
				else:
					flag_modified = True  # 否则证明缓存已经过时
					print("Cache is modified")
			if not flag_exists or flag_modified:  # 如果没有缓存或者缓存文件已经发生变化 or True
				print("Attempt to connect", geturl)
				server_socket.connect((hostname, port))
				server_socket.sendall(messageNotDecode)
				# server_socket.settimeout(1)# 设置接收超时时间为1秒
				# buff = b''
				with open(cache_path, 'wb') as f:
					while True:
						temp_buff = server_socket.recv(self.HTTP_BUFFER_SIZE)
						if not temp_buff:
							f.close()
							break
						client_socket.sendall(temp_buff)
						f.write(temp_buff)
						f.flush()
			# buff += temp_buff
			# if len(temp_buff) < self.HTTP_BUFFER_SIZE:
			# 	break
			# with open(cache_path, 'wb') as f:
			# 	f.write(buff)
			# 	f.close()
			# client_socket.sendall(buff)
			# client_ip = client_socket.getpeername()
			# server_ip = server_socket.getsockname()
			# print(f"Client IP: {client_ip} ")
			server_socket.close()
			client_socket.close()
		except Exception:
			print(f'Error: {address[0]}:{address[1]}')
			traceback.print_exc()
			client_socket.close()
			return
def main():
	filter_file_path = './filter.json'
	page404_path = './404.html'
	page403_path = './403.html'

	if exists(filter_file_path):
		with open(filter_file_path) as f:
			filter_json = load(f)
			f.flush()
			f.close()
	else:
		filter_json = {"host": [], "ip": [], "fishing": [], "fishing_to": "http://blog.nng-hrb.buzz/test"}  # 默认的过滤规则
		with open(filter_file_path, "w") as f:
			f.write(dumps(filter_json, ensure_ascii=False))
			f.flush()
			f.close()
	if exists(page404_path):
		with open(page404_path, "rb") as f:
			page404 = f.read()
			f.close()
	else:
		html = status_html('404 Not Found')
		page404 = html.encode()
		with open(page404_path, "w") as f:
			f.write(html)
			f.flush()
			f.close()
	if exists(page403_path):
		with open(page403_path, "rb") as f:
			page403 = f.read()
			f.close()
	else:
		html = status_html('403 Forbidden')
		page403 = html.encode()
		with open(page403_path, "w") as f:
			f.write(html)
			f.flush()
			f.close()
	clint_ip = get_lan_ip()
	port = 1392
	argv = sys.argv
	if len(argv) > 1:
		arg = argv[1]
		if validate_ip_and_port(arg):
			clint_ip, port = arg.split(':')
			port = int(port)
		elif validate_ip(arg):
			clint_ip = arg
		elif validate_port(arg):
			port = int(arg)
		else:
			print('Invalid argument!')
			return
	proxy = ProxyServer(port, filter_json, page404, page403)
	print(f'proxy server is running in {clint_ip}:{port}')
	while True:  # type: Tuple[socket, str]
		sock, address = proxy.server_main_socket.accept()  # sock 用于和源主机进行通信
		address = (clint_ip, address[1])
		Thread(target=proxy.proxy_connect, args=(sock, address)).start()
if __name__ == '__main__':
	main()
