from django.shortcuts import render
from django.http import HttpResponse ,JsonResponse,FileResponse
import requests
from bs4 import BeautifulSoup
import utils.mylogger as log
import utils.spider as sp
from django.views.decorators.csrf import csrf_exempt
import utils.dbUtil as db
from django.forms.models import model_to_dict
import json
from pathlib import Path
import os
import configparser

# Create your views here.
myLogger = log.Mylogger()


def getFilter(filter:str)->sp.TagFilter:
    if filter.strip() == "":
        return None
    filter_soup = BeautifulSoup(filter, "html.parser")
    filter_tags = [tag.name for tag in filter_soup.find_all(recursive=True)]
    # for tag in filter_soup.find_all(recursive=True):
    #     myLogger.debug(tag)
    #     if 'class' in tag.attrs:
    #         myLogger.debug(tag['class'])
        
    filter_classes = [tag['class'] if 'class' in tag.attrs else ''  for tag in filter_soup.find_all(recursive=True)]
    myLogger.debug(filter_tags)
    myLogger.debug(filter_classes)
    return sp.TagFilter(filter_tags, filter_classes)


def isValid(request):
    url = request.GET.get('url','')
    results = { "isValid": False if url=="" else sp.Spider(url, None).isUrlVailid(url)}
    return JsonResponse(results)

@csrf_exempt
def index(request):

    results={}

    url = request.POST.get('url','https://www.douban.com/')
    myLogger.debug("url is " + url)

    filter = request.POST.get('filter',"")
    myLogger.debug("filter is " + filter)
    
    encode = request.POST.get('encode',"")
    myLogger.debug("encode is " + encode)

    encode = request.POST.get('encode',"")
    myLogger.debug("encode is " + encode)

    surplus = request.POST.get('surplus',"")
    myLogger.debug("surplus is " + surplus)


#TODO  过滤条件格式检验
    if filter.strip() != "" :  
        spider = sp.Spider(url , encode)
        results = spider.executeWithFilter(getFilter(filter),getFilter(surplus))
        #TODO 数据持久化
        # with db.Mongo() as mongo:
        #     myLogger.debug("=========== db inserting ===========")
        #     mongo.addOne(results)
    else:
        results["content"] =  sp.Spider(url, encode).execute(url,getFilter(surplus)).get_text()
    return JsonResponse(results,json_dumps_params={'ensure_ascii':False})
    # return HttpResponse("Hello, world. You're at the polls index.")

@csrf_exempt
def deepDig(request):
    results={}
    # 网页地址
    url = request.POST.get('url','https://www.douban.com/')
    myLogger.debug("url is " + url)
    # 网页编码
    encode = request.POST.get('encode',"")
    myLogger.debug("encode is" + encode)
    # 二级页面地址集
    secondPagReg = request.POST.get('secondPagReg',"")
    myLogger.debug(secondPagReg.split(','))
    # 二级页面需要爬取的文章内容class
    content_class = request.POST.get('content_class',"")
    # myLogger.debug("content_class is" + content_class)
    myLogger.debug(content_class.split(','))
     # 二级页面需要爬取的标题class
    title_class = request.POST.get('title_class',"")
    myLogger.debug(title_class.split(','))
    # 网页多于标签过滤
    surplus = request.POST.get('surplus',"")
    myLogger.debug("surplus is " + surplus)
    # 网页地址相对路径所在域名
    preUrl = request.POST.get('preUrl',"")
    myLogger.debug("preUrl is " + preUrl)

    spider = sp.Spider(url,encode=encode,secondPagReg=secondPagReg.split(','), secondPageClasses=content_class.split(','),titleClasses=title_class.split(","))
    results = spider.deepExecute(getFilter(surplus)) if str.strip(preUrl) == "" else spider.deepExecute(getFilter(surplus),str.strip(preUrl))

    return JsonResponse(results,json_dumps_params={'ensure_ascii':False})

@csrf_exempt
def saveDB(request):
    # articles = request.POST.get('articles',"")
    
    data = json.loads(request.body)['articles']
    myLogger.debug(data)
    flag = False
    with db.Mongo() as mongo:
        myLogger.debug("=========== db inserting ===========")
        flag = mongo.addMany(data)
    results = {'result':0} if flag else {'result':-1}
    return JsonResponse(results,json_dumps_params={'ensure_ascii':False})


def downloadFile(request):
    file_path = os.path.join(Path(__file__).resolve().parent,"asset/news.cfg")
    file = open(file_path,'rb')
    response = FileResponse(file)
    response['Content-Type'] = 'application/octet-stream'
    response['Content-Disposition'] = "attachment;filename=news.cfg"
    return response

@csrf_exempt   
def change_template(request):
     # 项目名
    pro_name = request.POST.get('pro_name','YOUR_PROJECT_NAME')
    myLogger.debug("项目名字 is " + pro_name)
     # 网页地址
    url = request.POST.get('url','https://www.douban.com/')
    myLogger.debug("url is " + url)
    # 网页编码
    encode = request.POST.get('encode',"")
    myLogger.debug("encode is" + encode)
    # 二级页面地址集
    secondPagReg = request.POST.get('secondPagReg',"")
    myLogger.debug(secondPagReg.split(','))
    # 二级页面需要爬取的文章内容class
    content_class = request.POST.get('content_class',"")
    # myLogger.debug("content_class is" + content_class)
    myLogger.debug(content_class.split(','))
     # 二级页面需要爬取的标题class
    title_class = request.POST.get('title_class',"")
    myLogger.debug(title_class.split(','))
    # 网页多于标签过滤
    surplus = request.POST.get('surplus',"")
    myLogger.debug("surplus is " + surplus)
    # 网页地址相对路径所在域名
    preUrl = request.POST.get('preUrl',"")
    myLogger.debug("preUrl is " + preUrl)

    config_parser =  configparser.ConfigParser()
    config_file = os.path.join(Path(__file__).resolve().parent,"asset/template.cfg")
    config_parser.read(config_file,encoding='utf-8')

    #编码
    if encode == "GB2312":
        config_parser.set("news_config","encoding",'0')
    elif encode == "UTF-8":
        config_parser.set("news_config","encoding",'1')
    elif encode == "None":
        config_parser.set("news_config","encoding",'2')
    #项目名
    config_parser.set("news_config","project_name",pro_name)
    #网址 url
    config_parser.set("news_config","url",url)
    # 过滤信息
    config_parser.set("news_config","filter",surplus)
    #筛选新闻类型 
    config_parser.set("news_config","news_type",secondPagReg)
    #相对路径
    config_parser.set("news_config","pre_url",preUrl)
    #文章样式
    config_parser.set("news_config","article",content_class) 
    #标题样式
    config_parser.set("news_config","title",title_class)

    fp = os.path.join(Path(__file__).resolve().parent,"asset/news.cfg")
    with open(fp,'w') as f:
        config_parser.write(f)
    return JsonResponse({"result":"OK"},json_dumps_params={'ensure_ascii':False})
    