# from cmd import PROMPT
from encodings import utf_8
import pdfplumber
import re
import requests
from enum import Enum
from bs4 import BeautifulSoup
import os
# import fitz
prompt = "prompt message"
class download_states(Enum):
    '''
    stands for whether the bib file of the reference is downloaded
    '''
    not_found = 0
    not_download = 1
    download_success = 2
    waiting = 3
class pdfreference:
    '''
    contain the information and the downloading state of the reference from the pdf file
    '''
    def __init__(self, author='', title='', journal='', pagenumber='', date='', doi='', link=''):
        self.content = {}
        self.content["author"] = author
        self.content["title"] = title
        self.content["journal"] = journal
        self.content["pagenumber"] = pagenumber
        self.content["date"] = date
        self.content["doi"] = doi
        self.content["link"] = link
        self.states = download_states.not_download
        return
    # def author
    # def show(self):
    #     print
    def downloadbib(self):
        self.states = download_states.waiting
        url = "https://dblp.org"
        start = BeautifulSoup(requests.get(url).content, 'html')
        author_temp = ",".join(self.content["author"])
        if self.content["date"] != "":
            date_year = re.search('([0-9]+)', self.content["date"]).group(0)
        else: date_year = ""
        search_content = author_temp+", "+self.content["title"]+", "+date_year
        data = {
            "q":search_content
        }
        # print(data)
        response = requests.post(url, params=data)
        bs4 = BeautifulSoup(response.content,'lxml')
        # print(bs4)
        search_result = bs4.find(id="completesearch-publs")
        # print(search_result)
        if search_result.find(id = 'completesearch-info-matches').text == 'no matches':
            self.states = download_states.not_found
            return
        if search_result.find('p', class_="warning"):
            global prompt
            # print(search_result.find(class_="warning"))
            prompt = search_result.find('p', class_="warning").text
            # self.states = download_states.not_download
            return
        else:
            # print(search_result.find(class_='publ-list'))
            # print(search_result.find(class_='publ-list').find(class_="publ").select("nav>ul>li:nth-of-type(2)")[0].find(class_="head").a["href"])
            download_url = search_result.find(class_='publ-list').find(class_="publ").select("nav>ul>li:nth-of-type(2)")[0].find(class_="head").a["href"]
            # print(download_url)
            download_bib = BeautifulSoup(requests.post(download_url).content, 'lxml')
            bibfile = requests.get(download_bib.find(id = 'main').select("div>p>a")[0]["href"])
            bibpath = os.path.exists("bib")
            if not bibpath:
                nowpath = os.getcwd()
                os.makedir(nowpath + '\\bib\\')
            
            with open('bib/'+self.content["title"]+'.bib', 'w', encoding='utf_8') as fp:
                fp.write(bibfile)
            self.states = download_states.download_success
        # with open("response.html", "w", encoding='utf_8') as fp:
        #     fp.write(search_result.text)
        return

    def openbib(self):
        path = 'bib/'+self.content["title"]
        if os.path.exists(path):
            os.system('notepad %s' %(path))
    def clear(self):
        self.content["author"]=[]
        self.content["title"] = ''
        self.content["journal"] = ''
        self.content["pagenumber"] = ''
        self.content["date"] = ''
        self.content["doi"] = ''
        self.content["link"] = ''
        self.states = download_states.not_download
        return
    def __str__(self):
        return 'author: %s\ntitle: %s\njournal: %s\npagenumber: %s\ndate: %s\ndoi: %s\nlink: %s' \
        %(self.content["author"],self.content["title"],self.content["journal"],self.content["pagenumber"],self.content["date"],self.content["doi"],self.content["link"])


def get_page_txt(page):
    '''
    extract the text of a page of the pdf file

    Arguments:
    page -- pdfplnumber.page.Page, a page of the pdf file

    Return:
    str -- str, the text of the page

    '''
    str = ""
    page_left = page.crop((page.bbox[0],page.bbox[1], 0.5 * float(page.width), page.bbox[3]))
    page_right = page.crop((0.5 * float(page.width), page.bbox[1], page.width, page.bbox[3]))
    str += page_left.extract_text()
    str += page_right.extract_text()
    return str
def get_ref_pages(pdfname):
    '''
    get the text of the page containing the references of the pdf file

    Arguments:
    pdfname -- str, the path of the pdf file

    Return:
    ref_info_text -- str, the text of the page containing the references of the pdf file
    '''
    with pdfplumber.open(pdfname) as pdf:
        page_num = len(pdf.pages)
        ref_num = page_num-1
        while ref_num > 0:
            ref_txt = get_page_txt(pdf.pages[ref_num])
            if "REFERENCE" in ref_txt or "References" in ref_txt:
                break
            else:
                ref_num -=1
        ref_info_txt = ""
        for num in range(ref_num,page_num):
            ref_info_txt += get_page_txt(pdf.pages[num])
    return ref_info_txt

        # with open("pdf_info.txt","w",encoding='utf_8') as fp:
        #     fp.write(page_left.extract_text())
        #     fp.write(page_right.extract_text())
        # print(first_page.extract_text())
    #     print(page.bbox)
    #     print(page.width)
    #     print(page.height)
    #     print(type(page_right.lines[0]))
    # print(page_right.lines[0])

def get_ref_list(references):
    '''
    convert the string of references to list, every element of the list
    is the string of a reference

    Arguments:
    references -- str, the text of the page containing the references of the pdf file

    Return:
    ref_list -- list[str], the element of ref_list is the information of a reference
    '''
    references = references.replace("-\n","")
    references = references.replace("\n"," ")
    # with open("pdf_info_afterprocess.txt","w",encoding='utf_8') as fp:
    #     fp.write(references)
    ref_pattern = re.compile(r'((\[[0-9]+\] )+.*?[0-9]+\.( \[Online\].*?\.pdf\.?)?( doi.*?\. )?)')
    ref_list = ref_pattern.findall(references)
    for num,element in enumerate(ref_list):
        ref_list[num] = element[0]
     
    return ref_list
def convert_reflist_to_class(ref_list):
    '''
    convert the string of a references to class pdfreference defined

    Arguments:
    ref_list -- list[str], the element of ref_list is the information of a reference

    Return:
    references -- list[pdfreference], the element of ref_list is the information of a reference
    '''
    temp ={}
    references = []
    
    for index,content in enumerate(ref_list):
        temp.clear()
        ref_class = pdfreference()
        # print(ref_class)
        # print(type(ref_class.content))
        # pattern_author = re.compile(r'([A-Z][A-Za-z \.,]+(?=, “))')
        pattern_author_more = re.compile(r'((?<=\] )(([A-Z][A-Za-z\.]+ ?)+, )+([A-Z][A-Za-z\.]+ ?)+, and ([A-Z][A-Za-z\.]+ ?)+)')
        pattern_author_1_2 = re.compile(r'((?<=\] )[A-Z][A-Za-z \.]+)')
        pattern_title_quote = re.compile(r'(“[^“”]*”)')
        pattern_title_noquote = re.compile(r'((?<=\] , )[A-Z][A-Za-z :-]+)')
        pattern_journal = re.compile(r'[A-Z][A-Za-z ]+.*?\,')
        pattern_pagenumber = re.compile(r'((?<=pp.) *[0-9-–]+(?=[,.]))')
        # pattern_date = re.compile(r'(?<=, )(((((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sept)|(Oct)|(Nov)|(Dec))[a-z]*\.)? )?\d{4}(?=[,.]))')
        # pattern_date = re.compile(r'((?<=, )|(?<=,  ))(((((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sept)|(Oct)|(Nov)|(Dec))[a-z]*\.?)? +)?\d{4}(?=[,.]))')
        pattern_date = re.compile(r'((?<=, )|(?<=,  )|(?<=,))(((((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sep)|(Oct)|(Nov)|(Dec))[a-z]*\.?)? *)?\d{4}(?=[,.]))')
        pattern_doi = re.compile(r'(doi: +\S+ ?\S+\d(?=.))')
        pattern_link = re.compile(r'(http[s]?:.*[A-Za-z\/])')
        # pattern_link = re.compile(r'((http|https|ftp):\/\/)?((|[\w-]+\.)+[a-z0-9]+)(?:(\/[^/?#]+)*)?(\?[^#]+)?(#.+)?)')
        
        if re.search(pattern_author_more, content):
            # print(re.search(pattern_author, content).group(0))
            temp["author"] = re.search(pattern_author_more, content).group(0)
            temp["author"] = re.split(", and |, ", temp["author"])
            ref_class.content["author"] = temp["author"]
            content = re.sub(pattern_author_more, "", content)
        elif re.search(pattern_author_1_2, content):
            temp["author"] = re.search(pattern_author_1_2, content).group(0)
            temp["author"] = re.split(" and ", temp["author"])
            ref_class.content["author"] = temp["author"]
            content = re.sub(pattern_author_1_2, "", content)
        if re.search(pattern_title_quote, content):
            # print(re.search(pattern_title, content).group(0))
            temp["title"] = re.search(pattern_title_quote, content).group(0)
            ref_class.content["title"] = re.sub('[,“”]','', temp["title"])
            content = re.sub(pattern_title_quote, "", content)
        elif re.search(pattern_title_noquote, content):
            temp["title"] = re.search(pattern_title_noquote, content).group(0)
            ref_class.content["title"] = temp["title"]
            content = re.sub(pattern_title_noquote, "", content)
            
        if re.search(pattern_doi, content):
            # print(re.search(pattern_doi, content).group(0))
            temp["doi"] = re.search(pattern_doi, content).group(0)
            ref_class.content["doi"] = re.sub(r' |doi: +','',temp["doi"])
            content = re.sub(pattern_doi, "", content)
        if re.search(pattern_journal, content):
            # print(re.search(pattern_journal, content).group(0))
            temp["journal"] = re.search(pattern_journal, content).group(0)
            ref_class.content["journal"] = re.sub(r',','',temp["journal"])

        if re.search(pattern_pagenumber, content):
            # print(re.search(pattern_pagenumber, content).group(0))
            temp["pagenumber"] = re.search(pattern_pagenumber, content).group(0)
            ref_class.content["pagenumber"] = temp["pagenumber"]
            content = re.sub(pattern_pagenumber, "", content)
        if re.search(pattern_date, content):
            # print(re.search(pattern_date, content).group(0))
            temp["date"] = re.search(pattern_date, content).group(0)
            ref_class.content["date"] = temp["date"]
        if re.search(pattern_link, content):
            # print(re.search(pattern_link, content).group(0))
            temp["link"] = re.search(pattern_link, content).group(0)
            # print(temp)
            ref_class.content["link"] = re.sub(r' ','',temp["link"])
        # print(ref_class)
        # print(content)
        references.append(ref_class)
    return references

if __name__ == '__main__':

    pdf_path = "pdf/Next-Generation_Healthcare_Enabling_Technologies_for_Emerging_Bioelectromagnetics_Applications.pdf"
    # pdf_path = "pdf/Ten_Fundamental_Antenna-Theory_Puzzles_Solved_by_the_Antenna_Equation_A_remarkable_array_of_solutions.pdf"
    # pdf_path = "pdf/Tracking_Complete_Deformable_Objects_with_Finite_Elements.pdf"
    # print(get_ref_pages(pdf_path))

    ref_info = get_ref_pages(pdf_path)
    # with open("ref_info.txt","w",encoding='utf_8') as fp:
    #     fp.write(str(ref_list))
    ref_list = get_ref_list(ref_info)
    with open("ref_info.txt","w",encoding='utf_8') as fp:
        fp.write(str(ref_info))
    ref_class = pdfreference()
    references = convert_reflist_to_class(ref_list)