# -*- coding: utf-8 -*-
import base64
from binascii import hexlify
import gzip
import http.cookiejar
from io import BytesIO, StringIO
import os
import re
from shutil import move
import time
import unicodedata
from urllib.error import HTTPError, URLError
from urllib.parse import quote_plus, unquote, urlencode, urljoin, urlparse, urlunparse
from urllib.request import addinfourl, BaseHandler, build_opener, HTTPCookieProcessor, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, ProxyHandler, Request, urlopen

from Components.config import config, configfile, ConfigText

from Plugins.Extensions.IPTVPlayer.components.asynccall import IsMainThread, IsThreadTerminated, SetThreadKillable
from Plugins.Extensions.IPTVPlayer.components.iptvplayerinit import GetIPTVNotify, TranslateTXT as _
from Plugins.Extensions.IPTVPlayer.libs import ph
from Plugins.Extensions.IPTVPlayer.libs.e2ijson import loads as json_loads
from Plugins.Extensions.IPTVPlayer.p2p3.manipulateStrings import ensure_binary, ensure_str, strDecode, iterDictItems
from Plugins.Extensions.IPTVPlayer.tools.iptvtools import GetDefaultLang, iptv_system, IsExecutable, IsHttpsCertValidationEnabled, printDBG, printExc, rm, UsePyCurl
from Plugins.Extensions.IPTVPlayer.tools.iptvtypes import strwithmeta
from requests import Session as custom_Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

try:
    import ssl
except Exception:
    pass

try:
    import pycurl
except Exception:
    pass
try:
    from PIL import Image

    hasPIL = True
except ImportError:
    hasPIL = False


def DecodeGzipped(data):
    buf = BytesIO(data)
    f = gzip.GzipFile(fileobj=buf)
    return f.read()


def EncodeGzipped(data):
    f = BytesIO()
    gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1)
    gzf.write(data)
    gzf.close()
    encoded = f.getvalue()
    f.close()
    return encoded


class NoRedirection(HTTPRedirectHandler):
    def http_error_302(self, req, fp, code, msg, headers):
        infourl = addinfourl(fp, headers, req.get_full_url())
        # infourl.status = code
        infourl.code = code
        return infourl

    http_error_300 = http_error_302
    http_error_301 = http_error_302
    http_error_303 = http_error_302
    http_error_307 = http_error_302


class MultipartPostHandler(BaseHandler):
    handler_order = HTTPHandler.handler_order - 10

    def http_request(self, request):
        data = request.get_data()
        if data is not None and not isinstance(data, str):
            content_type, data = self.encode_multipart_formdata(data)
            request.add_unredirected_header('Content-Type', content_type)
            request.add_data(data)
        return request

    def encode_multipart_formdata(self, fields):
        LIMIT = '-----------------------------14312495924498'
        CRLF = '\r\n'
        L = []
        for (key, value) in fields:
            L.append('--' + LIMIT)
            L.append('Content-Disposition: form-data; name="%s"' % key)
            L.append('')
            L.append(value)
        L.append('--' + LIMIT + '--')
        L.append('')
        body = CRLF.join(L)
        content_type = 'multipart/form-data; boundary=%s' % LIMIT
        return content_type, body

    https_request = http_request


class CParsingHelper:
    @staticmethod
    def listToDir(cList, idx):
        cTree = {'dat': ''}
        deep = 0
        while (idx + 1) < len(cList):
            if cList[idx].startswith('<ul') or cList[idx].startswith('<li'):
                deep += 1
                nTree, idx, nDeep = CParsingHelper.listToDir(cList, idx + 1)
                if 'list' not in cTree:
                    cTree['list'] = []
                cTree['list'].append(nTree)
                deep += nDeep
            elif cList[idx].startswith('</ul>') or cList[idx].startswith('</li>'):
                deep -= 1
                idx += 1
            else:
                cTree['dat'] += cList[idx]
                idx += 1
            if deep < 0:
                break
        return cTree, idx, deep

    @staticmethod
    def getSearchGroups(data, pattern, grupsNum=1, ignoreCase=False):
        return ph.search(data, pattern, ph.IGNORECASE if ignoreCase else 0, grupsNum)

    @staticmethod
    def getDataBeetwenReMarkers(data, pattern1, pattern2, withMarkers=True):
        match1 = pattern1.search(data)
        if None is match1 or -1 == match1.start(0):
            return False, ''
        match2 = pattern2.search(data[match1.end(0):])
        if None is match2 or -1 == match2.start(0):
            return False, ''

        if withMarkers:
            return True, data[match1.start(0): (match1.end(0) + match2.end(0))]
        else:
            return True, data[match1.end(0): (match1.end(0) + match2.start(0))]

    @staticmethod
    def getDataBeetwenMarkers(data, marker1, marker2, withMarkers=True, caseSensitive=True):
        flags = 0
        if withMarkers:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.find(data, marker1, marker2, flags)

    @staticmethod
    def getAllItemsBeetwenMarkers(data, marker1, marker2, withMarkers=True, caseSensitive=True):
        flags = 0
        if withMarkers:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.findall(data, marker1, marker2, flags)

    @staticmethod
    def rgetAllItemsBeetwenMarkers(data, marker1, marker2, withMarkers=True, caseSensitive=True):
        flags = 0
        if withMarkers:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.rfindall(data, marker1, marker2, flags)

    @staticmethod
    def rgetDataBeetwenMarkers2(data, marker1, marker2, withMarkers=True, caseSensitive=True):
        flags = 0
        if withMarkers:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.rfind(data, marker1, marker2, flags)

    @staticmethod
    def rgetDataBeetwenMarkers(data, marker1, marker2, withMarkers=True):
        # this methods is not working as expected, but is is used in many places
        # so I will leave at it is, please use rgetDataBeetwenMarkers2
        idx1 = data.rfind(marker1)
        if -1 == idx1:
            return False, ''
        idx2 = data.rfind(marker2, idx1 + len(marker1))
        if -1 == idx2:
            return False, ''
        if withMarkers:
            idx2 = idx2 + len(marker2)
        else:
            idx1 = idx1 + len(marker1)
        return True, data[idx1:idx2]

    @staticmethod
    def getDataBeetwenNodes(data, node1, node2, withNodes=True, caseSensitive=True):
        flags = 0
        if withNodes:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.find(data, node1, node2, flags)

    @staticmethod
    def getAllItemsBeetwenNodes(data, node1, node2, withNodes=True, numNodes=-1, caseSensitive=True):
        flags = 0
        if withNodes:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.findall(data, node1, node2, flags, limits=numNodes)

    @staticmethod
    def rgetDataBeetwenNodes(data, node1, node2, withNodes=True, caseSensitive=True):
        flags = 0
        if withNodes:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.rfind(data, node1, node2, flags)

    @staticmethod
    def rgetAllItemsBeetwenNodes(data, node1, node2, withNodes=True, numNodes=-1, caseSensitive=True):
        flags = 0
        if withNodes:
            flags |= ph.START_E | ph.END_E
        if not caseSensitive:
            flags |= ph.IGNORECASE
        return ph.rfindall(data, node1, node2, flags, limits=numNodes)

    # this method is useful only for developers
    # to dump page code to the file
    @staticmethod
    def writeToFile(file, data, mode="w"):
        # helper to see html returned by ajax
        with open(file, mode) as text_file:
            text_file.write(data)

    @staticmethod
    def getNormalizeStr(txt, idx=None):
        POLISH_CHARACTERS = {'ą': 'a', 'ć': 'c', 'ę': 'ę', 'ł': 'l', 'ń': 'n', 'ó': 'o', 'ś': 's', 'ż': 'z', 'ź': 'z',
                             'Ą': 'A', 'Ć': 'C', 'Ę': 'E', 'Ł': 'L', 'Ń': 'N', 'Ó': 'O', 'Ś': 'S', 'Ż': 'Z', 'Ź': 'Z',
                             'á': 'a', 'é': 'e', 'í': 'i', 'ñ': 'n', 'ú': 'u', 'ü': 'u',
                             'Á': 'A', 'É': 'E', 'Í': 'I', 'Ñ': 'N', 'Ú': 'U', 'Ü': 'U',
                             }
        if isinstance(txt, bytes):
            txt = txt.decode('utf-8')
        if None is not idx:
            txt = txt[idx]
        nrmtxt = unicodedata.normalize('NFC', txt)
        ret_str = []
        for item in nrmtxt:
            if ord(item) > 128:
                item = POLISH_CHARACTERS.get(item)
                if item:
                    ret_str.append(item)
            else:  # pure ASCII character
                ret_str.append(item)
        return ''.join(ret_str)

    @staticmethod
    def isalpha(txt, idx=None):
        return CParsingHelper.getNormalizeStr(txt, idx).isalpha()

    @staticmethod
    def cleanHtmlStr(str):
        return ph.clean_html(str)


class common:
    HOST = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36'
    HEADER = None
    ph = CParsingHelper

    @staticmethod
    def getDefaultHeader(browser='firefox'):
        if browser == 'firefox':
            ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0'
        elif browser == 'iphone_3_0':
            ua = 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16'
        else:
            ua = common.HOST
        HTTP_HEADER = {'User-Agent': ua, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'DNT': 1}
        return dict(HTTP_HEADER)

    @staticmethod
    def getParamsFromUrlWithMeta(url, baseHeaderOutParams=None):
        from Plugins.Extensions.IPTVPlayer.iptvdm.iptvdh import DMHelper
        HANDLED_HTTP_HEADER_PARAMS = DMHelper.HANDLED_HTTP_HEADER_PARAMS  # ['Host', 'User-Agent', 'Referer', 'Cookie', 'Accept',  'Range']
        outParams = {}
        tmpParams = {}
        postData = None
        if isinstance(url, strwithmeta):
            if None is not baseHeaderOutParams:
                tmpParams['header'] = baseHeaderOutParams
            else:
                tmpParams['header'] = {}
            for key in url.meta:
                if key in HANDLED_HTTP_HEADER_PARAMS:
                    tmpParams['header'][key] = url.meta[key]
            if 0 < len(tmpParams['header']):
                outParams = tmpParams
            if 'iptv_proxy_gateway' in url.meta:
                outParams['proxy_gateway'] = url.meta['iptv_proxy_gateway']
            if 'iptv_http_proxy' in url.meta:
                outParams['http_proxy'] = url.meta['iptv_http_proxy']
        return outParams, postData

    @staticmethod
    def getBaseUrl(url, domainOnly=False):
        parsed_uri = urlparse(url)
        if domainOnly:
            domain = '{uri.netloc}'.format(uri=parsed_uri)
        else:
            domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
        return domain

    @staticmethod
    def getFullUrl(url, mainUrl='http://fake/'):  # NOSONAR
        if not url:
            return ''
        if url.startswith('./'):
            url = url[1:]

        currUrl = mainUrl
        mainUrl = common.getBaseUrl(currUrl)

        if url.startswith('//'):
            proto = mainUrl.split('://', 1)[0]
            url = proto + ':' + url
        elif url.startswith('://'):
            proto = mainUrl.split('://', 1)[0]
            url = proto + url
        elif url.startswith('/'):
            url = mainUrl + url[1:]
        elif 0 < len(url) and '://' not in url:
            if currUrl == mainUrl:
                url = mainUrl + url
            else:
                url = urljoin(currUrl, url)
        return url

    @staticmethod
    def isValidUrl(url):
        return url.startswith('http://') or url.startswith('https://')

    @staticmethod
    def buildHTTPQuery(query):
        def _process(query, data, key_prefix):
            if isinstance(data, dict):
                for key, value in data.items():
                    key = '%s[%s]' % (key_prefix, key) if key_prefix else key
                    _process(query, value, key)
            elif isinstance(data, list):
                for idx in range(len(data)):
                    _process(query, data[idx], '%s[%s]' % (key_prefix, idx))
            else:
                query.append((key_prefix, data))

        _query = []
        _process(_query, query, '')
        return _query

    def __init__(self, proxyURL='', useProxy=False, useMozillaCookieJar=True):
        self.proxyURL = proxyURL
        self.useProxy = useProxy
        self.geolocation = {}
        self.meta = {}  # metadata from previus request

        self.curlSession = None
        self.pyCurlAvailable = None
        if not useMozillaCookieJar:
            raise Exception("You should stop use parameter useMozillaCookieJar it change nothing, because from only MozillaCookieJar can be used")

    def reportHttpsError(self, type, url, msg):
        domain = self.getBaseUrl(url, True)
        messages = []
        messages.append(_('HTTPS connection error "%s"\n') % msg)
        messages.append(_('It looks like your current configuration do not allow to connect to the https://%s/.\n') % domain)

        if type == 'verify' and IsHttpsCertValidationEnabled():
            messages.append(_('You can disable HTTPS certificates validation in the E2iPlayer configuration to suppress this problem.'))
        else:
            pyCurlInstalled = False
            try:
                verInfo = pycurl.version_info()
                printDBG("usePyCurl VERSION: %s" % [verInfo])
                pyCurlInstalled = (int(verInfo[1].split('.')[0]) >= 7) and bool(verInfo[4] & pycurl.VERSION_ASYNCHDNS)
            except Exception:
                printExc()
            if pyCurlInstalled:
                if not UsePyCurl():
                    messages.append(_('You can enable PyCurl in the E2iPlayer configuration to fix this problem.'))
            else:
                messages.append(_('You can install PyCurl package with TLS 1.3 support from the feed to fix this problem.'))
        GetIPTVNotify().push('\n'.join(messages), 'error', 40, type + domain, 40)

    def usePyCurl(self):
        bRet = False
        if UsePyCurl():
            if self.pyCurlAvailable is None:
                try:
                    verInfo = pycurl.version_info()
                    printDBG("usePyCurl VERSION: %s" % [verInfo])
                    # #define CURL_VERSION_ASYNCHDNS    (1<<7)
                    # we need to have ASYNC DNS to be able "cancel"
                    # request
                    self.pyCurlAvailable = (int(verInfo[1].split('.')[0]) >= 7) and bool(verInfo[4] & pycurl.VERSION_ASYNCHDNS)
                except Exception:
                    self.pyCurlAvailable = False
                    printExc()
            bRet = self.pyCurlAvailable
        return bRet

    def getCountryCode(self, lower=True):
        if 'countryCode' not in self.geolocation:
            sts, data = self.getPage('http://ip-api.com/json')  # NOSONAR
            if sts:
                try:
                    self.geolocation['countryCode'] = json_loads(data)['countryCode']
                except Exception:
                    printExc()
        return self.geolocation.get('countryCode', '').lower()

    def _pyCurlLoadCookie(self, cookiefile, ignoreDiscard=True, ignoreExpires=False):
        cj = http.cookiejar.MozillaCookieJar()
        f = open(cookiefile)
        lines = f.readlines()
        f.close()
        for idx in range(len(lines)):
            lineNeedFix = False
            fields = lines[idx].split('\t')
            if len(fields) < 5:
                continue
            if fields[0].startswith('#HttpOnly_'):
                fields[0] = fields[0][10:]
                lineNeedFix = True
            if fields[4] == '0':
                fields[4] = ''
                lineNeedFix = True
            if lineNeedFix:
                lines[idx] = '\t'.join(fields)
        cj._really_load(StringIO(''.join(lines)), cookiefile, ignore_discard=ignoreDiscard, ignore_expires=ignoreExpires)
        return cj

    def clearCookie(self, cookiefile, leaveNames=[], removeNames=None, ignoreDiscard=True, ignoreExpires=False):
        try:
            toRemove = []
            if self.usePyCurl():
                cj = self._pyCurlLoadCookie(cookiefile, ignoreDiscard, ignoreExpires)
            else:
                cj = http.cookiejar.MozillaCookieJar()
            cj.load(cookiefile, ignore_discard=ignoreDiscard)
            for cookie in cj:
                if cookie.name not in leaveNames and (None is removeNames or cookie.name in removeNames):
                    toRemove.append(cookie)
            for cookie in toRemove:
                cj.clear(cookie.domain, cookie.path, cookie.name)
            cj.save(cookiefile, ignore_discard=ignoreDiscard)
        except Exception:
            printExc()
            return False
        return True

    def getCookieItem(self, cookiefile, item):
        cookiesDict = self.getCookieItems(cookiefile)
        return cookiesDict.get(item, '')

    def getCookie(self, cookiefile, ignoreDiscard=True, ignoreExpires=False):
        cj = None
        try:
            if self.usePyCurl():
                cj = self._pyCurlLoadCookie(cookiefile, ignoreDiscard, ignoreExpires)
            else:
                cj = http.cookiejar.MozillaCookieJar()
                cj.load(cookiefile, ignore_discard=ignoreDiscard)
        except Exception:
            printExc()
        return cj

    def getCookieItems(self, cookiefile, ignoreDiscard=True, ignoreExpires=False):
        cookiesDict = {}
        try:
            cj = self.getCookie(cookiefile, ignoreDiscard, ignoreExpires)
            for cookie in cj:
                cookiesDict[cookie.name] = cookie.value
        except Exception:
            printExc()
        return cookiesDict

    def getCookieHeader(self, cookiefile, allowedNames=[], dounquote=True, ignoreDiscard=True, ignoreExpires=False):
        ret = ''
        try:
            cookiesDict = self.getCookieItems(cookiefile, ignoreDiscard, ignoreExpires)
            for name in cookiesDict:
                if 0 < len(allowedNames) and name not in allowedNames:
                    continue
                value = cookiesDict[name]
                if dounquote:
                    value = unquote(value)
                ret += '%s=%s; ' % (name, value)
        except Exception:
            printExc()
        return ret

    def _getPageWithPyCurl(self, url, params={}, post_data=None):
        if IsMainThread():
            msg1 = _('It is not allowed to call getURLRequestData from main thread.')
            msg2 = _('You should never perform block I/O operations in the __init__.')
            GetIPTVNotify().push(r'\s'.join([msg1, msg2]), 'error', 40)
            raise Exception("Wrong usage!")

        # by default we will work in return_data mode
        if 'return_data' not in params:
            params['return_data'] = True

        if 'save_to_file' in params:  # some cleaning
            params['save_to_file'] = params['save_to_file'].replace('//', '/')

        self.meta = {}
        metadata = self.meta
        out_data = None
        sts = False

        CurrBuffer = BytesIO()
        checkFromFirstBytes = params.get('check_first_bytes', [])
        fileHandler = None
        firstAttempt = [True]
        maxDataSize = params.get('max_data_size', -1)

        responseHeaders = {}

        def _headerFunction(headerLine):
            headerLine = ensure_str(headerLine)
            if ':' not in headerLine:
                if 0 == maxDataSize:
                    if headerLine in ['\r\n', '\n']:
                        if 'n' not in responseHeaders:
                            return 0
                        responseHeaders.pop('n', None)
                    elif headerLine.startswith('HTTP/') and headerLine.split(' 30', 1)[-1][0:1] in ['1', '2', '3', '7']:  # new location with 301, 302, 303, 307
                        responseHeaders['n'] = True
                return

            name, value = headerLine.split(':', 1)

            name = name.strip()
            value = value.strip()

            name = name.lower()
            responseHeaders[name] = value

        def _breakConnection(toWriteData):
            CurrBuffer.write(toWriteData)
            if maxDataSize <= CurrBuffer.tell():
                return 0

        def _bodyFunction(toWriteData):
            # we started receiving body data so all headers are available
            # so we can check them if needed
            if firstAttempt[0]:
                firstAttempt[0] = False
                if 'check_maintype' in params and params['check_maintype'] != responseHeaders.get('content-type', '').split('/', 1)[0]:
                    printDBG('wrong maintype: %s' % responseHeaders.get('content-type', ''))
                    return 0

                if 'check_subtypes' in params:
                    contentSubType = responseHeaders.get('content-type', '').split('/', 1)[-1]
                    try:
                        valid = False
                        for subType in params['check_subtypes']:
                            if subType == contentSubType:
                                valid = True
                                break
                        if not valid:
                            printDBG('wrong type: %s' % responseHeaders.get('content-type', ''))
                            return 0
                    except Exception:
                        printExc()
                        return 0  # wrong params?

            # if we should check start body data
            if len(checkFromFirstBytes):
                CurrBuffer.write(toWriteData)
                toWriteData = None
                valid = False
                value = ensure_binary(CurrBuffer.getvalue())
                for toCheck in checkFromFirstBytes:
                    if len(toCheck) <= len(value):
                        if value.startswith(toCheck):
                            valid = True
                            # valid no need to check anymore
                            del checkFromFirstBytes[:]
                            break
                    elif toCheck.startswith(value):
                        # it could be valid - we need to wait for more data
                        valid = True
                if not valid:
                    printDBG('wrong body: %s' % hexlify(value))
                    return 0

            if fileHandler is not None and 0 == len(checkFromFirstBytes):
                # all check were done so, we can start write data to file
                try:
                    if fileHandler.tell() == 0 and CurrBuffer.tell() > 0:
                        fileHandler.write(ensure_binary(CurrBuffer.getvalue()))

                    if toWriteData is not None:
                        fileHandler.write(toWriteData)
                except Exception:
                    printExc()
                    return 0  # wrong file handle

            if toWriteData is not None and params['return_data']:
                CurrBuffer.write(toWriteData)

        def _terminateFunction(download_t, download_d, upload_t, upload_d):
            if IsThreadTerminated():
                printDBG(">> _terminateFunction")
                return True  # anything else then None will cause pycurl perform cancel

        try:
            timeout = params.get('timeout', None)

            if 'host' in params:
                host = params['host']
            else:
                host = self.HOST

            if 'header' in params:
                headers = params['header']
            elif None is not self.HEADER:
                headers = self.HEADER
            else:
                headers = {'User-Agent': host}

            if 'User-Agent' not in headers:
                headers['User-Agent'] = host

            printDBG('pCommon - getPageWithPyCurl() -> params: ' + str(params))
            printDBG('pCommon - getPageWithPyCurl() -> headers: ' + str(headers))

            if 'save_to_file' in params:
                fileHandler = open(params['save_to_file'], "wb")

            # we can not kill thread when we are in any function of pycurl
            SetThreadKillable(False)

            if None is self.curlSession:
                curlSession = pycurl.Curl()
            elif params.get('use_new_session', False):
                curlSession = self.curlSession
                self.curlSession = None
                curlSession.close()
                curlSession = pycurl.Curl()
            else:
                # use previous session to be able to reuse connection
                curlSession = self.curlSession
                self.curlSession = None
                curlSession.reset()

            if params.get('use_fresh_connect', False):
                curlSession.setopt(pycurl.FRESH_CONNECT, 1)

            customHeaders = []
            for key in headers:
                lKey = key.lower()
                if lKey == 'user-agent':
                    curlSession.setopt(pycurl.USERAGENT, headers[key])
                elif lKey == 'cookie':
                    curlSession.setopt(pycurl.COOKIE, headers[key])
                elif lKey == 'referer':
                    curlSession.setopt(pycurl.REFERER, headers[key])
                else:
                    customHeaders.append('%s: %s' % (key, headers[key]))
            if len(customHeaders):
                curlSession.setopt(pycurl.HTTPHEADER, customHeaders)

            curlSession.setopt(pycurl.ACCEPT_ENCODING, "")  # enable all supported built-in compressions
            if None is not params.get('ssl_protocol', None):
                sslProtoVer = self.getPyCurlSSLProtocolVersion(params['ssl_protocol'])
                if None is not sslProtoVer:
                    curlSession.setopt(pycurl.SSLVERSION, sslProtoVer)

            if 'use_cookie' not in params and 'cookiefile' in params and ('load_cookie' in params or 'save_cookie' in params):
                params['use_cookie'] = True

            if params.get('use_cookie', False):
                cookiesStr = ''
                for cookieKey in list(params.get('cookie_items', {}).keys()):
                    printDBG("cookie_item[%s=%s]" % (cookieKey, params['cookie_items'][cookieKey]))
                    cookiesStr += '%s=%s; ' % (cookieKey, params['cookie_items'][cookieKey])

                if cookiesStr != '':
                    curlSession.setopt(pycurl.COOKIE, cookiesStr)  # 'Set-Cookie: foo=baar') #

                if params.get('load_cookie', False):
                    curlSession.setopt(pycurl.COOKIEFILE, params.get('cookiefile', ''))

                if params.get('save_cookie', False):
                    curlSession.setopt(pycurl.COOKIEJAR, params.get('cookiefile', ''))

            if timeout is not None:
                curlSession.setopt(pycurl.CONNECTTIMEOUT, timeout)  # in seconds - connection timeout
                curlSession.setopt(pycurl.LOW_SPEED_TIME, timeout)  # in seconds
                curlSession.setopt(pycurl.LOW_SPEED_LIMIT, 10)  # in bytes
                # set maximum time the request is allowed to take
                # curlSession.setopt(pycurl.TIMEOUT, 300) # in seconds

            if not params.get('no_redirection', False):
                curlSession.setopt(pycurl.FOLLOWLOCATION, 1)
                curlSession.setopt(pycurl.UNRESTRICTED_AUTH, 1)
                curlSession.setopt(pycurl.MAXREDIRS, 5)

            # debug
            # curlSession.setopt(pycurl.VERBOSE, 1)
            # curlSession.setopt(pycurl.DEBUGFUNCTION, debug_fun)

            if not IsHttpsCertValidationEnabled():
                curlSession.setopt(pycurl.SSL_VERIFYHOST, 0)
                curlSession.setopt(pycurl.SSL_VERIFYPEER, 0)
                # curlSession.setopt(pycurl.PROXY_SSL_VERIFYHOST, 0)
                curlSession.setopt(pycurl.PROXY_SSL_VERIFYPEER, 0)
            else:
                curlSession.setopt(pycurl.CAINFO, "/etc/ssl/certs/ca-certificates.crt")
                curlSession.setopt(pycurl.PROXY_CAINFO, "/etc/ssl/certs/ca-certificates.crt")

            # proxy support
            if self.useProxy:
                http_proxy = self.proxyURL
            else:
                http_proxy = ''
            # proxy from parameters (if available) overwrite default one
            if 'http_proxy' in params:
                http_proxy = params['http_proxy']
            if '' != http_proxy:
                printDBG('getPageWithPyCurl USE PROXY')
                curlSession.setopt(pycurl.PROXY, http_proxy)

            pageUrl = url
            proxy_gateway = params.get('proxy_gateway', '')
            if proxy_gateway != '':
                pageUrl = proxy_gateway.format(quote_plus(pageUrl, ''))
            printDBG("pageUrl: [%s]" % pageUrl)

            curlSession.setopt(pycurl.URL, pageUrl)

            if None is not post_data:
                printDBG('pCommon - getPageWithPyCurl() -> post data: ' + str(post_data))
                if params.get('raw_post_data', False):
                    curlSession.setopt(pycurl.POSTFIELDS, post_data)
                elif params.get('multipart_post_data', False):
                    printDBG("multipart_post_data NOT SUPPORTED")
                    dataPost = post_data
                    curlSession.setopt(pycurl.HTTPPOST, post_data)
                    # curlSession.setopt(pycurl.CUSTOMREQUEST, "PUT")
                else:
                    curlSession.setopt(pycurl.POSTFIELDS, urlencode(post_data))

            curlSession.setopt(pycurl.HEADERFUNCTION, _headerFunction)

            if fileHandler:
                printDBG('pCommon - getPageWithPyCurl() -> fileHandler exists, pycurl.WRITEFUNCTION = _bodyFunction')
                curlSession.setopt(pycurl.WRITEFUNCTION, _bodyFunction)
            elif maxDataSize >= 0:
                printDBG('pCommon - getPageWithPyCurl() -> fileHandler exists, pycurl.WRITEFUNCTION = _breakConnection')
                curlSession.setopt(pycurl.WRITEFUNCTION, _breakConnection)
            else:
                printDBG('pCommon - getPageWithPyCurl() -> pycurl.WRITEDATA to CurrBuffer')
                curlSession.setopt(pycurl.WRITEDATA, CurrBuffer)

            curlSession.setopt(pycurl.NOPROGRESS, False)
            curlSession.setopt(pycurl.PROGRESSFUNCTION, _terminateFunction)
            curlSession.setopt(pycurl.NOSIGNAL, 1)
            # if 0 == maxDataSize:
            #    curlSession.setopt(pycurl.NOBODY, True);

            if not IsThreadTerminated():
                if maxDataSize >= 0:
                    try:
                        curlSession.perform()
                    except pycurl.error as e:
                        if e[0] != pycurl.E_WRITE_ERROR:
                            raise e
                        else:
                            printExc()
                else:
                    curlSession.perform()

                metadata['url'] = curlSession.getinfo(pycurl.EFFECTIVE_URL)
                metadata['status_code'] = curlSession.getinfo(pycurl.HTTP_CODE)
                metadata['size_download'] = curlSession.getinfo(pycurl.SIZE_DOWNLOAD)

                # reset will cause lost all cookies, so we force to saved them in the file
                if params.get('use_cookie', False) and params.get('save_cookie', False):
                    curlSession.setopt(pycurl.COOKIELIST, 'FLUSH')
                    curlSession.setopt(pycurl.COOKIELIST, 'ALL')

                curlSession.reset()
                # to be re-used in next request
                self.curlSession = curlSession

                # we should not use pycurl anymore
                SetThreadKillable(True)

                self.fillHeaderItems(metadata, responseHeaders, collectAllHeaders=params.get('collect_all_headers'))

                if params['return_data']:
                    out_data = ensure_str(CurrBuffer.getvalue())
                else:
                    out_data = ""

                out_data, metadata = self.handleCharset(params, out_data, metadata)
                if metadata['status_code'] != 200:
                    ignoreCodeRanges = params.get('ignore_http_code_ranges', [(404, 404), (500, 500)])
                    for ignoreCodeRange in ignoreCodeRanges:
                        if metadata['status_code'] >= ignoreCodeRange[0] and metadata['status_code'] <= ignoreCodeRange[1]:
                            sts = True
                            break
                else:
                    sts = True

            if fileHandler:
                fileHandler.close()

            if metadata['status_code'] == 200 and metadata['content-type'] == 'image/webp':
                new_name = params['save_to_file'].replace(".webp", ".jpg")
                printDBG("Change extension of webp image: %s" % new_name)
                try:
                    move(params['save_to_file'], new_name)
                    self.convertWebp(new_name)
                except:
                    pass

        except pycurl.error as e:
            try:
                metadata['pycurl_error'] = (e[0], str(e[1]))
            except Exception:
                metadata['pycurl_error'] = (e.args[0], e.args[1])  # it seems pycurl in p3 has different structure
            printExc()
        except Exception:
            printExc()

        SetThreadKillable(True)

        printDBG('pCommon - getPageWithPyCurl() return -> \nsts: %s\nmetadata: %s\n' % (sts, metadata))
        if params.get('with_metadata', False):
            out_data = strwithmeta(out_data, metadata)

        return sts, out_data

    def convertWebp(self, file_path, png=False):
        printDBG("PCommon.convertWebp %s" % file_path)

        output_path = file_path + ".png" if png else ".jpg"
        if os.path.exists(file_path):
            if os.path.exists(output_path):
                os.remove(output_path)
            if hasPIL:
                try:
                    img = Image.open(file_path)
                    # img.thumbnail((400, 300), Image.LANCZOS)
                    # printDBG("PCommon.convertWebp save %s" % output_path)
                    img.save(output_path, format="png" if png else "jpeg", quality=80)
                    img.close()
                    os.remove(file_path)
                    move(output_path, file_path)
                    # printDBG("PCommon.convertWebp rename %s %s" % (output_path, file_path))
                    return
                except:
                    printExc()
                    return

            if IsExecutable('ffmpeg'):
                command = "ffmpeg -i %s %s && test -e %s && rm %s && mv %s %s " % (file_path, output_path, output_path, file_path, output_path, file_path)

                printDBG("Send command %s" % command)
                self.cmd = iptv_system(command)
        else:
            printDBG("PCommon.convertWebp file not exists %s" % file_path)

    def getPageWithPyCurl(self, url, params={}, post_data=None):
        # some error can be caused because of session reuse
        # if we use old curlSession and fail we should
        # re-try with fresh curlSession
        if self.curlSession is not None:
            sessionReused = True
        else:
            sessionReused = False

        sts, data = False, None
        try:
            maxTries = 3
            tries = 0
            while tries < maxTries:
                tries += 1
                sts, data = self._getPageWithPyCurl(url, params, post_data)
                if not sts and 'pycurl_error' in self.meta and pycurl.E_SSL_CONNECT_ERROR == self.meta['pycurl_error'][0]:
                    if 'SSL_set_session failed' in self.meta['pycurl_error'][1] or '-308' in self.meta['pycurl_error'][1]:
                        printDBG("pCommon - getPageWithPyCurl() - retry with fresh session")
                        if sessionReused:
                            sessionReused = False
                            continue
                    elif '-313' in self.meta['pycurl_error'][1] and 'ssl_protocol' not in params:
                        params = dict(params)
                        params['ssl_protocol'] = 'TLSv1_2'
                        continue
                break

            if not sts and 'pycurl_error' in self.meta:
                if self.meta['pycurl_error'][0] == pycurl.E_SSL_CONNECT_ERROR:
                    self.reportHttpsError('other', url, self.meta['pycurl_error'][1])
                elif self.meta['pycurl_error'][0] in [pycurl.E_SSL_CACERT, pycurl.E_SSL_ISSUER_ERROR,
                                                      pycurl.E_SSL_PEER_CERTIFICATE, pycurl.E_SSL_CACERT_BADFILE]:
                    self.reportHttpsError('verify', url, self.meta['pycurl_error'][1])
                elif self.meta['pycurl_error'][0] == pycurl.E_SSL_INVALIDCERTSTATUS:
                    self.reportHttpsError('verify', url, self.meta['pycurl_error'][1])
        except Exception:
            printExc()
        return sts, data

    def fillHeaderItems(self, metadata, responseHeaders, camelCase=False, collectAllHeaders=False):
        returnKeys = ['content-type', 'content-disposition', 'content-length', 'location', 'last-modified']
        if camelCase:
            sourceKeys = ['Content-Type', 'Content-Disposition', 'Content-Length', 'Location', 'Last-Modified']
        else:
            sourceKeys = returnKeys
        for idx in range(len(returnKeys)):
            if sourceKeys[idx] in responseHeaders:
                metadata[returnKeys[idx]] = responseHeaders[sourceKeys[idx]]

        if collectAllHeaders:
            if "Access-Control-Allow-Headers" in responseHeaders:
                acah = responseHeaders["Access-Control-Allow-Headers"]
                acah_keys = acah.split(',')

                for key in acah_keys:
                    key = key.strip()
                    if key in responseHeaders:
                        metadata[key.lower()] = responseHeaders[key]

            for header, value in responseHeaders.items():
                metadata[header.lower()] = responseHeaders[header]

    def getPage(self, url, addParams={}, post_data=None):
        ''' wraps getURLRequestData '''

        # if curl should be used and can be used
        if addParams.get('return_data', True) and not addParams.get('CFProtection', False) and self.usePyCurl():
            return self.getPageWithPyCurl(url, addParams, post_data)

        try:
            addParams['url'] = url
            if 'return_data' not in addParams:
                addParams['return_data'] = True
            response = self.getURLRequestData(addParams, post_data)
            status = True
        except HTTPError as e:
            try:
                printExc()
                if e.code == 308:
                    return self.getPage(e.fp.info().get('Location', ''), addParams, post_data)
                status = False
                response = e
                if e.code == 403:
                    return (status, _('Access Forbidden'))
                if addParams.get('return_data', False):
                    self.meta = {}
                    metadata = self.meta
                    metadata['url'] = e.fp.geturl()
                    metadata['status_code'] = e.code
                    self.fillHeaderItems(metadata, e.fp.info(), True, collectAllHeaders=addParams.get('collect_all_headers'))

                    data = e.fp.read(addParams.get('max_data_size', -1))
                    if e.fp.info().get('Content-Encoding', '') == 'gzip':
                        data = DecodeGzipped(data)

                    data, metadata = self.handleCharset(addParams, data, metadata)
                    response = strwithmeta(data, metadata)
                    e.fp.close()
            except Exception:
                printExc()
        except URLError as e:
            printExc()
            errorMsg = str(e)
            if 'ssl_protocol' not in addParams and 'TLSV1_ALERT_PROTOCOL_VERSION' in errorMsg:
                try:
                    newParams = dict(addParams)
                    newParams['ssl_protocol'] = 'TLSv1_2'
                    return self.getPage(url, newParams, post_data)
                except Exception:
                    pass
            if 'VERSION' in errorMsg:
                self.reportHttpsError('version', url, errorMsg)
            elif 'VERIFY_FAILED' in errorMsg:
                self.reportHttpsError('verify', url, errorMsg)
            elif 'SSL' in errorMsg or 'unknown url type: https' in errorMsg:  # GET_SERVER_HELLO
                self.reportHttpsError('other', url, errorMsg)

            response = None
            status = False

        except Exception:
            printExc()
            response = None
            status = False

        if addParams['return_data'] and status and not isinstance(response, str):
            status = False

        return (status, response)

    def getPageCFProtection(self, baseUrl, params={}, post_data=None):
        cf_user = params.get('header', {}).get('User-Agent', '')
        header = {'Referer': baseUrl, 'User-Agent': cf_user, 'Accept-Encoding': 'text'}
        header.update(params.get('header', {}))
        params.update({'with_metadata': True, 'use_cookie': True, 'save_cookie': True, 'load_cookie': True, 'cookiefile': params.get('cookiefile', ''), 'header': header})
        params.update({'CFProtection': True})
        start_time = time.time()
        sts, data = self.getPage(baseUrl, params, post_data)

        if not sts and data is not None:
            from Plugins.Extensions.IPTVPlayer.libs.recaptcha_mye2i import UnCaptchaReCaptcha
            recaptcha = UnCaptchaReCaptcha(lang=GetDefaultLang())
            token = recaptcha.processCaptcha(start_time, baseUrl, captchaType='CF')
            if token != '':
                r = json_loads(base64.b64decode(token))
                printDBG('>>>>>>>>>>>>>>>>>>>>> CF token >>>>>>>>>>>>>>>>>>>>>>')
                printDBG(r)
                printDBG('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
                if cf_user != r.get('user_agent', ''):
                    cf_user = r.get('user_agent', '')
                    config.plugins.iptvplayer.cloudflare_user = ConfigText(default="", fixed_size=False)
                    config.plugins.iptvplayer.cloudflare_user.value = cf_user
                    config.plugins.iptvplayer.cloudflare_user.save()
                    configfile.save()
                params['header']['User-Agent'] = cf_user

                cookies = r.get('cookie', [])
                if len(cookies) > 0:
                    try:
                        params['cookie_items'] = {'cf_clearance': cookies[0]['value']}
                    except Exception:
                        try:
                            params['cookie_items'] = {'cf_clearance': cookies['value']}  # mye2i < 1.7
                        except Exception:
                            printDBG('missing cf_clearance value in received token')
                sts, data = self.getPage(baseUrl, params, post_data)
                if not sts:
                    printDBG('>>>>>>>>>>>>>>>> not sts returned data >>>>>>>>>>>>>>')
                    printDBG(data)
                    printDBG('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')

        data = strwithmeta(data, {'cf_user': cf_user})

        return sts, data

    def saveWebFileWithPyCurl(self, file_path, url, add_params={}, post_data=None):
        bRet = False
        downDataSize = 0

        add_params['with_metadata'] = True
        add_params['save_to_file'] = file_path
        if 'maintype' in add_params:
            add_params['check_maintype'] = add_params.pop('maintype')
        if 'subtypes' in add_params:
            add_params['check_subtypes'] = add_params.pop('subtypes')

        sts, data = self.getPageWithPyCurl(url, add_params, post_data)
        if sts:
            downDataSize = data.meta['size_download']
        else:
            rm(file_path)
        return {'sts': sts, 'fsize': downDataSize}

    def saveWebFile(self, file_path, url, addParams={}, post_data=None):
        addParams = dict(addParams)

        outParams, postData = self.getParamsFromUrlWithMeta(url)
        addParams.update(outParams)
        if 'header' not in addParams and 'host' not in addParams:
            host = self.HOST
            header = {'User-Agent': host, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
            addParams['header'] = header
        addParams['return_data'] = False

        # if curl should and can be used
        if self.usePyCurl():
            return self.saveWebFileWithPyCurl(file_path, url, addParams, post_data)

        bRet = False
        downDataSize = 0
        dictRet = {}
        try:
            sts, downHandler = self.getPage(url, addParams, post_data)

            if addParams.get('ignore_content_length', False):
                meta = downHandler.info()
                contentLength = int(meta.getheaders("Content-Length")[0])
            else:
                contentLength = None

            checkFromFirstBytes = addParams.get('check_first_bytes', [])
            OK = True
            if 'maintype' in addParams and addParams['maintype'] != downHandler.headers.maintype:
                printDBG("common.getFile wrong maintype! requested[%r], retrieved[%r]" % (addParams['maintype'], downHandler.headers.maintype))
                if 0 == len(checkFromFirstBytes):
                    downHandler.close()
                OK = False

            if OK and 'subtypes' in addParams:
                OK = False
                for item in addParams['subtypes']:
                    if item == downHandler.headers.subtype:
                        OK = True
                        break

            if OK or len(checkFromFirstBytes):
                blockSize = addParams.get('block_size', 8192)
                fileHandler = None
                while True:
                    CurrBuffer = downHandler.read(blockSize)

                    if len(checkFromFirstBytes):
                        OK = False
                        for item in checkFromFirstBytes:
                            if CurrBuffer.startswith(ensure_binary(item)):
                                OK = True
                                break
                        if not OK:
                            break
                        else:
                            checkFromFirstBytes = []

                    if not CurrBuffer:
                        break
                    downDataSize += len(CurrBuffer)
                    if len(CurrBuffer):
                        if fileHandler is None:
                            fileHandler = open(file_path, "wb")
                        fileHandler.write(CurrBuffer)
                if fileHandler is not None:
                    fileHandler.close()
                downHandler.close()
                if None is not contentLength:
                    if contentLength == downDataSize:
                        bRet = True
                elif downDataSize > 0:
                    bRet = True

                # decode webp to jpeg
                if url.endswith(".webp"):
                    if addParams.get('webp_convert_to_png', False):
                        self.convertWebp(file_path, png=True)
                    else:
                        self.convertWebp(file_path)

        except Exception:
            printExc("common.getFile download file exception")
        dictRet.update({'sts': bRet, 'fsize': downDataSize})
        return dictRet

    def getUrllibSSLProtocolVersion(self, protocolName):
        if not isinstance(protocolName, str):
            GetIPTVNotify().push('getUrllibSSLProtocolVersion error. Please report this problem to iptvplayere2@gmail.com', 'error', 40)
            return protocolName
        if protocolName == 'TLSv1_2':
            return ssl.PROTOCOL_TLSv1_2
        elif protocolName == 'TLSv1_1':
            return ssl.PROTOCOL_TLSv1_1
        return None

    def getPyCurlSSLProtocolVersion(self, protocolName):
        if not isinstance(protocolName, str):
            GetIPTVNotify().push('getPyCurlSSLProtocolVersion error. Please report this problem to iptvplayere2@gmail.com', 'error', 40)
            return protocolName
        if protocolName == 'TLSv1_2':
            return pycurl.SSLVERSION_TLSv1_2
        elif protocolName == 'TLSv1_1':
            return pycurl.SSLVERSION_TLSv1_1
        return None

    def getURLRequestData(self, params={}, post_data=None):

        def urlOpen(req, customOpeners, timeout):
            # req = ensure_binary(req)
            # above line was added to resolve > "TypeError: POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str."
            # but it seems it breaks other scenarios
            # also documentation says req should be a string. :(
            # NEEDS FURTHER INVESTIGATION !!!

            if len(customOpeners) > 0:
                opener = build_opener(*customOpeners)
                if timeout is not None:
                    response = opener.open(req, timeout=timeout)
                else:
                    response = opener.open(req)
            else:
                if timeout is not None:
                    response = urlopen(req, timeout=timeout)
                else:
                    response = urlopen(req)
            return response

        if IsMainThread():
            msg1 = _('It is not allowed to call getURLRequestData from main thread.')
            msg2 = _('You should never perform block I/O operations in the __init__.')
            GetIPTVNotify().push(r'\s'.join([msg1, msg2]), 'error', 40)
            raise Exception("Wrong usage!")

        if 'max_data_size' in params and not params.get('return_data', False):
            raise Exception("return_data == False is not accepted with max_data_size.\nPlease also note that return_data == False is deprecated and not supported with PyCurl HTTP backend!")

        cj = http.cookiejar.MozillaCookieJar()
        response = None
        req = None
        out_data = None
        opener = None
        self.meta = {}
        metadata = self.meta

        timeout = params.get('timeout', None)

        if 'host' in params:
            host = params['host']
        else:
            host = self.HOST

        if 'header' in params:
            headers = params['header']
        elif None is not self.HEADER:
            headers = self.HEADER
        else:
            headers = {'User-Agent': host}

        if 'User-Agent' not in headers:
            headers['User-Agent'] = host

        printDBG('pCommon - getURLRequestData() -> params: ' + str(params))
        printDBG('pCommon - getURLRequestData() -> headers: ' + str(headers))

        customOpeners = []
        # cookie support
        if 'use_cookie' not in params and 'cookiefile' in params and ('load_cookie' in params or 'save_cookie' in params):
            params['use_cookie'] = True

        if params.get('use_cookie', False):
            if params.get('load_cookie', False):
                try:
                    cj.load(params['cookiefile'], ignore_discard=True)
                except IOError:
                    printDBG('Cookie file [%s] not exists' % params['cookiefile'])
                except Exception:
                    printExc()
            try:
                for cookieKey in list(params.get('cookie_items', {}).keys()):
                    printDBG("cookie_item[%s=%s]" % (cookieKey, params['cookie_items'][cookieKey]))
                    cookieItem = http.cookiejar.Cookie(version=0, name=cookieKey, value=params['cookie_items'][cookieKey], port=None, port_specified=False, domain='', domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
                    cj.set_cookie(cookieItem)
            except Exception:
                printExc()
            customOpeners.append(HTTPCookieProcessor(cj))

        if params.get('no_redirection', False):
            customOpeners.append(NoRedirection())

        if None is not params.get('ssl_protocol', None):
            sslProtoVer = self.getUrllibSSLProtocolVersion(params['ssl_protocol'])
        else:
            sslProtoVer = None
        # debug
        # customOpeners.append(urllib2.HTTPSHandler(debuglevel=1))
        # customOpeners.append(urllib2.HTTPHandler(debuglevel=1))
        if not IsHttpsCertValidationEnabled():
            try:
                if sslProtoVer is not None:
                    ctx = ssl._create_unverified_context(sslProtoVer)
                else:
                    ctx = ssl._create_unverified_context()
                customOpeners.append(HTTPSHandler(context=ctx))
            except Exception:
                pass
        elif sslProtoVer is not None:
            ctx = ssl.SSLContext(sslProtoVer)
            customOpeners.append(HTTPSHandler(context=ctx))

        # proxy support
        if self.useProxy:
            http_proxy = self.proxyURL
        else:
            http_proxy = ''
        # proxy from parameters (if available) overwrite default one
        if 'http_proxy' in params:
            http_proxy = params['http_proxy']
        if '' != http_proxy:
            printDBG('getURLRequestData USE PROXY')
            customOpeners.append(ProxyHandler({"http": http_proxy}))
            customOpeners.append(ProxyHandler({"https": http_proxy}))

        pageUrl = params['url']
        proxy_gateway = params.get('proxy_gateway', '')
        if proxy_gateway != '':
            pageUrl = proxy_gateway.format(quote_plus(pageUrl, ''))
        printDBG("pageUrl: [%s]" % pageUrl)
        if '","' in pageUrl:  # points incorrectly formatted dict or list
            pageUrl = pageUrl.split('"', 1)[0]  # " is incorrect char for url, shouldn't be there so removing it and everything after it
            printDBG("CORRECTED pageUrl: [%s]" % pageUrl)

        if None is not post_data:
            printDBG('pCommon - getURLRequestData() -> post data: ' + str(post_data))
            if params.get('raw_post_data', False):
                dataPost = post_data
            elif params.get('multipart_post_data', False):
                customOpeners.append(MultipartPostHandler())
                dataPost = post_data
            else:
                dataPost = urlencode(post_data)
            dataPost = ensure_binary(dataPost)
            req = Request(pageUrl, dataPost, headers)
        else:
            req = Request(pageUrl, None, headers)

        if not params.get('return_data', False):
            out_data = urlOpen(req, customOpeners, timeout)
        else:
            gzip_encoding = False
            try:
                response = urlOpen(req, customOpeners, timeout)
                if response.info().get('Content-Encoding') == 'gzip':
                    gzip_encoding = True
                try:
                    metadata['url'] = response.geturl()
                    metadata['status_code'] = response.getcode()
                    self.fillHeaderItems(metadata, response.info(), True, collectAllHeaders=params.get('collect_all_headers'))
                except Exception:
                    pass

                max = params.get('max_data_size', -1)
                if max == -1:
                    data = response.read()
                else:
                    data = response.read(max)
                response.close()
            except HTTPError as e:
                ignoreCodeRanges = params.get('ignore_http_code_ranges', [(404, 404), (500, 500)])
                ignoreCode = False
                metadata['status_code'] = e.code
                for ignoreCodeRange in ignoreCodeRanges:
                    if e.code >= ignoreCodeRange[0] and e.code <= ignoreCodeRange[1]:
                        ignoreCode = True
                        break

                if ignoreCode:
                    printDBG('!!!!!!!! %s: getURLRequestData - handled' % e.code)
                    if e.fp.info().get('Content-Encoding', '') == 'gzip':
                        gzip_encoding = True
                    try:
                        metadata['url'] = e.fp.geturl()
                        self.fillHeaderItems(metadata, e.fp.info(), True, collectAllHeaders=params.get('collect_all_headers'))
                    except Exception:
                        pass
                    max = params.get('max_data_size', -1)
                    if max == -1:
                        data = e.fp.read()
                    else:
                        data = e.fp.read(max)
                    # e.msg
                    # e.headers
                elif e.code == 503:
                    if params.get('use_cookie', False):
                        new_cookie = e.fp.info().get('Set-Cookie', '')
                        printDBG("> new_cookie[%s]" % new_cookie)
                        cj.save(params['cookiefile'], ignore_discard=True)
                    raise e
                else:
                    if e.code in [300, 302, 303, 307] and params.get('use_cookie', False) and params.get('save_cookie', False):
                        new_cookie = e.fp.info().get('Set-Cookie', '')
                        printDBG("> new_cookie[%s]" % new_cookie)
                        # for cookieKey in params.get('cookie_items', {}).keys():
                        #    cj.clear('', '/', cookieKey)
                        cj.save(params['cookiefile'], ignore_discard=True)
                    raise e
            try:
                if gzip_encoding:
                    printDBG('Content-Encoding == gzip')
                    out_data = DecodeGzipped(data)
                else:
                    out_data = data
            except Exception as e:
                printExc()
                if params.get('max_data_size', -1) == -1:
                    msg1 = _("Critical Error – Content-Encoding gzip cannot be handled!")
                    msg2 = _("Last error:\n%s" % str(e))
                    GetIPTVNotify().push('%s\n\n%s' % (msg1, msg2), 'error', 20)
                out_data = data

        if params.get('use_cookie', False) and params.get('save_cookie', False):
            try:
                cj.save(params['cookiefile'], ignore_discard=True)
            except Exception as e:
                printExc()
                raise e

        out_data, metadata = self.handleCharset(params, out_data, metadata)
        if params.get('with_metadata', False) and params.get('return_data', False):
            out_data = strwithmeta(out_data, metadata)

        return out_data

    def handleCharset(self, params, data, metadata):
        try:
            if params.get('return_data', False) and params.get('convert_charset', True):
                encoding = ''
                if 'content-type' in metadata:
                    encoding = self.ph.getSearchGroups(metadata['content-type'], r'''charset=([A-Za-z0-9\-]+)''', 1, True)[0].strip().upper()
                if encoding == '' and params.get('search_charset', False):
                    encoding = self.ph.getSearchGroups(strDecode(data, 'ignore'), '''(<meta[^>]+?Content-Type[^>]+?>)''', ignoreCase=True)[0]
                    encoding = self.ph.getSearchGroups(encoding, r'''charset=([A-Za-z0-9\-]+)''', 1, True)[0].strip().upper()
                if encoding not in ['', 'UTF-8']:
                    printDBG(">> encoding[%s]" % encoding)
                    try:
                        data = data.decode(encoding)
                    except Exception:
                        printExc()
                    metadata['orig_charset'] = encoding
                else:
                    try:
                        data = strDecode(data)
                    except Exception:
                        data = strDecode(data, 'ignore')
        except Exception:
            printExc()

        return data, metadata

    def urlEncodeNonAscii(self, b):
        return re.sub(b'[\x80-\xFF]', lambda c: '%%%02x' % ord(c.group(0)), b)

    def iriToUri(self, iri):
        try:
            if isinstance(iri, bytes):
                iri = iri.decode('utf-8')
            parts = urlparse(iri)
            encodedParts = []
            for parti, part in enumerate(parts):
                newPart = part
                try:
                    if parti == 1:
                        newPart = part.encode('idna')
                    else:
                        newPart = self.urlEncodeNonAscii(part.encode('utf-8'))
                except Exception:
                    printExc()
                encodedParts.append(ensure_str(newPart))
            return urlunparse(encodedParts)
        except Exception:
            printExc()
        return iri

    def makeABCList(self, tab=['0 - 9']):
        strTab = list(tab)
        for i in range(65, 91):
            strTab.append(str(chr(i)))
        return strTab

    def getPageRequest(self, baseUrl, params={}, post_data=None):

        self.meta = {}
        metadata = self.meta

        retries = Retry(total=2, backoff_factor=1, status_forcelist=[500, 502, 503, 504], allowed_methods=["GET", "POST"])
        adapter = HTTPAdapter(max_retries=retries)

        with custom_Session() as http:
            http.mount("http://", adapter)  # NOSONAR
            http.mount("https://", adapter)

        pageUrl = self.iriToUri(baseUrl)
        printDBG(f"pageUrl: [{pageUrl}]")

        timeout = params.get('timeout', 15)
        method = params.get('method', 'GET')
        maxDataSize = params.get('max_data_size', None)
        allow_redirects = params.get('allow_redirects', False)
        verify = params.get('verify', False)
        stream = params.get('stream', True)

        if 'host' in params:
            host = params['host']
        else:
            host = self.HOST

        if 'header' in params:
            headers = params['header']
        elif self.HEADER is not None:
            headers = self.HEADER
        else:
            headers = {'User-Agent': host}

        if 'User-Agent' not in headers:
            headers['User-Agent'] = host

        for key, value in iterDictItems(headers):
            if isinstance(value, int):
                headers[key] = str(value)

        # printDBG(f'pCommon - getPageRequest() -> params: {str(params)}')
        # printDBG(f'pCommon - getPageRequest() -> headers: {str(headers)}')
        # printDBG(f'pCommon - getPageRequest() -> method: {str(method)}')

        cookies = None
        if 'use_cookie' not in params and 'cookiefile' in params and ('load_cookie' in params or 'save_cookie' in params):
            params['use_cookie'] = True

        if params.get('use_cookie', False):
            if params.get('cookie_items', {}):
                cookies = params.get('cookie_items', {})
                printDBG(f"cookies -> {cookies}")
            else:
                r = http.request(method, pageUrl)
                if r.cookies:
                    cookies = r.cookies

        dataPost = None
        if post_data is not None:
            printDBG(f'pCommon - getPageRequest() -> post data: {str(post_data)}')
            if params.get('raw_post_data', False) or method.upper() == 'POST':
                dataPost = post_data
            else:
                dataPost = urlencode(post_data)
            dataPost = ensure_binary(dataPost)

        sts, data = False, ""
        try:
            r = http.request(method, pageUrl, headers=headers, data=dataPost, verify=verify, stream=stream, cookies=cookies, timeout=timeout, allow_redirects=allow_redirects)
            r.raise_for_status()

            sts, data = True, r.content
        except Exception:
            printExc()

        self.fillHeaderItems(metadata, r.headers, True, collectAllHeaders=params.get('collect_all_headers'))

        if maxDataSize is not None and int(r.headers.get('Content-Length', 0)) > maxDataSize:
            raise Exception(f"Data size exceeds the maximum allowed size of {maxDataSize} bytes.")

        return sts, data
