import re
from urllib.request import Request, urlopen, urlretrieve
from bs4 import BeautifulSoup
import os
import threading
from entity.BasicData import BasicData
from entity.FilePath import FilePath
from utils.SqlUtil import auto_session_maker
from utils.MongoUtil import collection

threadLock = threading.Lock()


def getByBsObjBasicData(bsObj, url, lastIndex):
    content = bsObj.find('div', {'class': 'list_con bgff'})
    return BasicData(content.find('h1').getText(),
                     url[url[: lastIndex - 1].rfind('/') + 1: url.rfind('.')].replace('/', '-'), url)


def saveBasicData(title, code, url, basicData=None):
    with auto_session_maker() as session:
        byOne = session.query(BasicData).filter(BasicData.code == code).first()
        if byOne:
            session.close()
            return byOne.id
        if basicData is None:
            basicData = BasicData(title, code, url)
        session.add(basicData)
        session.commit()
        session.refresh(basicData)
        session.expunge(basicData)
        return basicData.id


def getBody(req):
    byte = None
    while True:
        try:
            byte = urlopen(req).read()
            break
        except BaseException as e:
            print(e)
            continue
    try:
        return byte.decode('utf-8')
    except BaseException as e:
        print(e)
        return byte.decode('gbk', "ignore")


def getBsObj(url):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
    req = Request(url, headers=headers)
    print(url)
    data = collection.find_one({'_id': url})
    if data is None:
        data = {'_id': url, 'body': getBody(req)}
        collection.insert_one(data)
    return BeautifulSoup(data['body'], features="html.parser")


class PictureHouse:
    char_list = ['*', '|', ':', '?', '/', '<', '>', '"', '\\', '?']

    def __init__(self, filePath, baseUrl, startUrl=None):
        self.filePath = filePath
        self.baseUrl = baseUrl
        self.startUrl = startUrl or baseUrl
        self.urls = list()
        self.existUrls = set()

    def run(self):
        while len(self.urls):
            url = self.urls.pop(0)
            self.getUrl(url)
            if bool(re.match('.*list.*\\.html', url)):
                self.loadListPage(url)
            elif bool(re.match('.*/[0-9]+/[0-9_]+\.html', url)):
                self.loadImagePage(url)

    def start(self, threadCount=4):
        self.getUrl(self.startUrl)
        threads = []
        for _ in range(threadCount):
            thread = threading.Thread(target=self.run)
            thread.start()
            threads.append(thread)

    def getUrl(self, url, baseUrl=None, regular='^(/meinv|https.*/meinv).*'):
        bsObj = getBsObj(url)
        aLabels = bsObj.findAll('a', href=re.compile(regular))
        baseUrl = baseUrl or self.baseUrl
        if aLabels:
            for item in aLabels:
                url = item['href']
                if not url.startswith('http'):
                    url = baseUrl + url
                if url not in self.existUrls:
                    self.urls.append(url)
                    self.existUrls.add(url)

    def saveFilePath(self, basicDataId, title, bsObj, code, index):
        imgUrl, imgPath = self.getImgPath(bsObj, code, title, index)
        with auto_session_maker() as session:
            if imgUrl is None or imgPath is None:
                return
            if session.query(FilePath).filter(FilePath.basicDataId == basicDataId).filter(
                    FilePath.path == imgPath).first():
                return
            while True:
                try:
                    urlretrieve(imgUrl, os.path.join(self.filePath, imgPath))
                    break
                except BaseException as e:
                    print(e)
                continue
            session.add(FilePath(basicDataId, title, imgPath))

    def getImgPath(self, bsObj, code, title, index):
        savePath = code + '/' + title + '/'
        if not os.path.exists(os.path.join(self.filePath, savePath)):
            os.makedirs(os.path.join(self.filePath, savePath))
        try:
            imgUrl = (bsObj.find('img', {'id': 'bigpicimg'}) or bsObj.find('div', {'id': 'bigpic'}).find('img'))['src']
        except Exception as e:
            print(e)
            return None, None
        imgPath = os.path.join(savePath, str(index)) + imgUrl[imgUrl.rfind('.'):]
        return imgUrl, imgPath

    def loadListPage(self, url):
        lastIndex = url.rfind('/') + 1
        prefixUrl = url[: lastIndex]
        suffix = url[lastIndex: url.rfind('.')]
        bsObj = getBsObj(url)
        pages = bsObj.find('div', {'class': 'pages'})
        if not pages:
            return
        pageSize = int(
            re.findall('\d+', re.findall('^共\d+页', pages.find('span', {'class': 'pageinfo'}).getText().replace(' ',
                                                                                                               ''))[
                0])[0])
        self.getUrl(url)
        for i in range(2, pageSize + 1):
            itemUrl = prefixUrl + suffix + '_' + str(i) + '.html'
            self.getUrl(itemUrl)

    def loadImagePage(self, url):
        lastIndex = url.rfind('/') + 1
        prefixUrl = url[: lastIndex]
        suffix = url[lastIndex: url.rfind('.')]
        bsObj = getBsObj(url)
        content = bsObj.find('div', {'class': 'list_con bgff'})
        if content is None:
            return
        title = content.find('h1').getText()
        for char in self.char_list:
            if char in title:
                title = title.replace(char, ' ')
        code = url[url[: lastIndex - 1].rfind('/') + 1: url.rfind('.')].replace('/', '-')
        pages = content.find('div', {'class': 'pages'})
        if pages:
            pages = pages.find('ul')
            try:
                pageSize = int(re.findall('\d+', re.findall('共\d+页', pages.find('li').getText())[0])[0])
            except BaseException as e:
                print(e)
                pageSize = 1
        else:
            pageSize = 1
        basicDataId = saveBasicData(title, code, url)
        self.saveFilePath(basicDataId, title, bsObj, code, 1)
        for i in range(2, pageSize + 1):
            itemUrl = prefixUrl + suffix + '_' + str(i) + '.html'
            self.saveFilePath(basicDataId, title, getBsObj(itemUrl), code, i)
