# user/views.py
import re
import os
from datetime import datetime
from django.db.models import Q,Count,Sum,F,Max,Min,Avg
from django.conf import settings
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.hashers import make_password
from django.contrib.auth.backends import ModelBackend

from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets
from rest_framework import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework.permissions import DjangoModelPermissions
from django_filters.rest_framework import DjangoFilterBackend

from .models import Job
from .filter import CrawlerByKeyboardFilter
from .serializer import CrawlerProfileSerializer
from utils.pagination import CustomPageNumberPagination
from utils.generics import ListAPIView, CreateAPIView, UpdateAPIView
# 通用序列化
class CrawlerResponseSerializer(serializers.Serializer):
	status = serializers.CharField()
	msg = serializers.CharField()

# 通用序列化
class StoreDataSerializer(serializers.Serializer):
	status = serializers.CharField()
	results = serializers.ListField()

# 城市分析序列化
class CityAnalyseSerializer(serializers.Serializer):
	status = serializers.CharField()
	results = serializers.ListField()

# 静态爬取数据接口
class CrawlerDataView(APIView):

	params = openapi.Schema(type=openapi.TYPE_OBJECT, required=['username','pwd1','pwd2'], properties={
		'keyboard': openapi.Schema(type=openapi.TYPE_STRING, description='关键字', examples='电脑'),
		'pages': openapi.Schema(type=openapi.TYPE_STRING, description='页数', examples='10'),
		})

	'''启动爬虫爬取'''
	@swagger_auto_schema(
		title='modify_pwd',
		operation_description='根据关键字进行爬取数据',
		responses={200: CrawlerResponseSerializer},
		request_body=params)
	def post(self, request):
		data = request.data
		
		keyboard = data.get('keyboard', '')
		pages = data.get('pages', '')
		if not keyboard or not pages:
			return Response({'status':'error', 'msg': '信息不全！'})
		# 换成相对地址
		os.system("python 绝对地址recruitment_crawler/apps/utils/crawler.py "+keyboard+" "+pages)
		return Response({'status':'ok', 'msg': '爬虫启动成功'})

# 静态爬取-城市分析
class CityAnalyseView(APIView):

	params = openapi.Schema(type=openapi.TYPE_OBJECT, required=['username','pwd1','pwd2'], properties={
		'keyboard': openapi.Schema(type=openapi.TYPE_STRING, description='关键字', examples='软件开发'),
		})

	'''静态爬取-城市分析'''
	@swagger_auto_schema(
		title='modify_pwd',
		operation_description='静态爬取-城市分析',
		responses={200: CityAnalyseSerializer},
		request_body=params)

	def post(self, request):
		data = request.data
		keyboard = data.get('keyboard', '')
		keyboard_dict = dict()
		if keyboard:
			keyboard_dict['job_name__contains'] = keyboard
		citys = Job.objects.filter(**keyboard_dict).exclude(min_wage="面议").values('job_city').distinct()
		count = []
		for index,city in enumerate(citys):
			search_dict = dict()
			search_dict['job_city'] = city['job_city']
			keyboard_dict['job_city'] = city['job_city']
			if keyboard:
				search_dict['job_name__contains'] = keyboard
			min_wage = Job.objects.filter(**keyboard_dict).exclude(min_wage="面议").aggregate(Min('min_wage'),Max('min_wage'),Avg('min_wage'))
			max_wage = Job.objects.filter(**keyboard_dict).exclude(min_wage="面议").aggregate(Min('max_wage'),Max('max_wage'),Avg('max_wage'))
			search_dict['min_wage_min'] = min_wage['min_wage__min']
			search_dict['min_wage_max'] = min_wage['min_wage__max']
			search_dict['min_wage_avg'] = min_wage['min_wage__max']
			search_dict['max_wage_min'] = max_wage['max_wage__min']
			search_dict['max_wage_max'] = max_wage['max_wage__max']
			search_dict['max_wage_avg'] = max_wage['max_wage__avg']
			count.append(search_dict)
		return Response({'status':'ok', 'results': count})

# 根据工作名称、工作经验、学历要求进行筛选工作地区职位数量分布
class JobAreaAnalyseView(APIView):

	params = openapi.Schema(type=openapi.TYPE_OBJECT, required=[], properties={
		'job_name': openapi.Schema(type=openapi.TYPE_STRING, description='工作名称', examples='软件开发'),
		'job_experience': openapi.Schema(type=openapi.TYPE_STRING, description='工作经验', examples='1-3年'),
		'job_education': openapi.Schema(type=openapi.TYPE_STRING, description='学历要求', examples='学历不限'),
		})

	@swagger_auto_schema(
		title='job_area_analyse',
		operation_description='根据工作名称、工作经验、学历要求进行筛选工作地区分布',
		responses={200: StoreDataSerializer},
		request_body=params)

	def post(self, request):
		data = request.data
		job_name = data.get('job_name', '')
		job_experience = data.get('job_experience', '')
		job_education = data.get('job_education', '')
		keyboard_dict = dict()
		if job_name:
			keyboard_dict['job_name__contains'] = job_name
		if job_experience:
			keyboard_dict['job_experience__contains'] = job_experience
		if job_education:
			keyboard_dict['job_education__contains'] = job_education
		citys = Job.objects.filter(**keyboard_dict).values('job_city').distinct()
		count = []
		for index,city in enumerate(citys):
			search_dict = dict()
			search_dict['job_city'] = city['job_city']
			keyboard_dict['job_city'] = city['job_city']
			num = Job.objects.filter(**keyboard_dict).count()
			search_dict['num'] = num
			count.append(search_dict)
		return Response({'status':'ok', 'results': count})

# 根据工作名称、工作经验、学历要求进行筛选工作经验
class JobExperienceAnalyseView(APIView):

	params = openapi.Schema(type=openapi.TYPE_OBJECT, required=[], properties={
		'job_name': openapi.Schema(type=openapi.TYPE_STRING, description='工作名称', examples='软件开发'),
		'job_experience': openapi.Schema(type=openapi.TYPE_STRING, description='工作经验', examples='1-3年'),
		'job_education': openapi.Schema(type=openapi.TYPE_STRING, description='学历要求', examples='学历不限'),
		})

	@swagger_auto_schema(
		title='job_experience_analyse',
		operation_description='根据工作名称、工作经验、学历要求进行筛选工作经验',
		responses={200: StoreDataSerializer},
		request_body=params)

	def post(self, request):
		data = request.data
		job_name = data.get('job_name', '')
		job_experience = data.get('job_experience', '')
		job_education = data.get('job_education', '')
		keyboard_dict = dict()
		if job_name:
			keyboard_dict['job_name__contains'] = job_name
		if job_experience:
			keyboard_dict['job_experience__contains'] = job_experience
		if job_education:
			keyboard_dict['job_education__contains'] = job_education
		jobExperience = Job.objects.filter(**keyboard_dict).values('job_experience').distinct()
		count = []
		for index,experience in enumerate(jobExperience):
			search_dict = dict()
			search_dict['job_experience'] = experience['job_experience']
			keyboard_dict['job_experience'] = experience['job_experience']
			num = Job.objects.filter(**keyboard_dict).count()
			search_dict['num'] = num
			count.append(search_dict)
		return Response({'status':'ok', 'results': count})

# 根据工作名称、工作经验、学历要求进行筛选学历要求
class JobEducationAnalyseView(APIView):

	params = openapi.Schema(type=openapi.TYPE_OBJECT, required=[], properties={
		'job_name': openapi.Schema(type=openapi.TYPE_STRING, description='工作名称', examples='软件开发'),
		'job_experience': openapi.Schema(type=openapi.TYPE_STRING, description='工作经验', examples='1-3年'),
		'job_education': openapi.Schema(type=openapi.TYPE_STRING, description='学历要求', examples='学历不限'),
		})

	@swagger_auto_schema(
		title='job_education_analyse',
		operation_description='根据工作名称、工作经验、学历要求进行筛选学历要求',
		responses={200: StoreDataSerializer},
		request_body=params)

	def post(self, request):
		data = request.data
		job_name = data.get('job_name', '')
		job_experience = data.get('job_experience', '')
		job_education = data.get('job_education', '')
		keyboard_dict = dict()
		if job_name:
			keyboard_dict['job_name__contains'] = job_name
		if job_experience:
			keyboard_dict['job_experience__contains'] = job_experience
		if job_education:
			keyboard_dict['job_education__contains'] = job_education
		jobEducation = Job.objects.filter(**keyboard_dict).values('job_education').distinct()
		count = []
		for index,education in enumerate(jobEducation):
			search_dict = dict()
			search_dict['job_education'] = education['job_education']
			keyboard_dict['job_education'] = education['job_education']
			num = Job.objects.filter(**keyboard_dict).count()
			search_dict['num'] = num
			count.append(search_dict)
		return Response({'status':'ok', 'results': count})

# 获取全部的爬虫所得数据
class GetCrawlerProfileListView(ListAPIView, GenericViewSet):

	"""获取所有爬虫数据"""
	queryset = Job.objects.all()
	serializer_class = CrawlerProfileSerializer


# 根据工作名称、工作地点、公司名称、经验要求、学历要求进行筛选
class GetCrawlerProfileByKeyboardView(ListAPIView, GenericViewSet):

	"""根据关键词筛选爬虫数据"""
	queryset = Job.objects.all()
	serializer_class = CrawlerProfileSerializer
	filter_backends = (DjangoFilterBackend,)
	filter_class = CrawlerByKeyboardFilter