import utils from '../utils'
import {BinArray} from '../utils'
import ShpConverterText from '../shpconverter-text'

/**
 *  DBF format references:
 * http://www.dbf2002.com/dbf-file-format.html
 * http://www.digitalpreservation.gov/formats/fdd/fdd000325.shtml
 * http://www.clicketyclick.dk/databases/xbase/format/index.html
 * http://www.clicketyclick.dk/databases/xbase/format/data_types.html
 */
const Dbf = {}
// source: http://webhelp.esri.com/arcpad/8.0/referenceguide/index.htm#locales/task_code.htm
Dbf.languageIds = [0x01,'437',0x02,'850',0x03,'1252',0x08,'865',0x09,'437',0x0A,'850',0x0B,'437',0x0D,'437',0x0E,'850',0x0F,'437',0x10,'850',0x11,'437',0x12,'850',0x13,'932',0x14,'850',0x15,'437',0x16,'850',0x17,'865',0x18,'437',0x19,'437',0x1A,'850',0x1B,'437',0x1C,'863',0x1D,'850',0x1F,'852',0x22,'852',0x23,'852',0x24,'860',0x25,'850',0x26,'866',0x37,'850',0x40,'852',0x4D,'936',0x4E,'949',0x4F,'950',0x50,'874',0x57,'1252',0x58,'1252',0x59,'1252',0x64,'852',0x65,'866',0x66,'865',0x67,'861',0x6A,'737',0x6B,'857',0x6C,'863',0x78,'950',0x79,'949',0x7A,'936',0x7B,'932',0x7C,'874',0x86,'737',0x87,'852',0x88,'857',0xC8,'1250',0xC9,'1251',0xCA,'1254',0xCB,'1253',0xCC,'1257']

// Language & Language family names for some code pages
Dbf.encodingNames = {
  '932': "Japanese",
  '936': "Simplified Chinese",
  '950': "Traditional Chinese",
  '1252': "Western European",
  '949': "Korean",
  '874': "Thai",
  '1250': "Eastern European",
  '1251': "Russian",
  '1254': "Turkish",
  '1253': "Greek",
  '1257': "Baltic"
}

Dbf.ENCODING_PROMPT =
  "To avoid corrupted text, re-import using the \"encoding=\" option.\n" +
  "To see a list of supported encodings, run the \"encodings\" command."

Dbf.lookupCodePage = function(lid) {
  let i = Dbf.languageIds.indexOf(lid)
  return i == -1 ? null : Dbf.languageIds[i+1]
}

Dbf.readAsciiString = function(bin, size) {
  const require7bit = true
  const str = bin.readCString(size, require7bit)
  if (str === null) {
    // stop("DBF file contains non-ascii text.\n" + Dbf.ENCODING_PROMPT)
    utils.error(`DBF file contains non-ascii text.\n ${Dbf.ENCODING_PROMPT}`)
  }
  return utils.trim(str)
}

Dbf.readStringBytes = function(bin, size, buf) {
  let count = 0
  let c = null
  for (let i = 0; i < size; i += 1) {
    c = bin.readUint8()
    if (c === 0) break
    if (count > 0 || c !== 32) {
      buf[count++] = c
    }
  }
  while (count > 0 && buf[count - 1] === 32) {
    count--
  }
  return count
}

Dbf.getStringReader = function(arg) {
  const encoding = arg || 'ascii'
  const slug = ShpConverterText.standardizeEncodingName(encoding)
  const buf = utils.createBuffer(256)
  const inNode = typeof module === 'object'
  
  if (inNode && (slug === 'utf8' || slug === 'ascii')) {
    return (bin, size) => {
      const n = Dbf.readStringBytes(bin, size, buf)
      return buf.toString(slug, 0, n)
    }
  }

  return function readEncodedString(bin, size) {
    const n = Dbf.readStringBytes(bin, size, buf)
    let str = ''
    let c = null
    for (let i = 0; i < n; i += 1) {
      c = buf[i]
      if (c > 127) {
        return ShpConverterText.bufferToString(buf, encoding, 0, n)
      }
      str += String.fromCharCode(c)
    }
    return str
  }
}

Dbf.bufferContainsHighBit = function(buf, n) {
  for (let i=0; i<n; i++) {
    if (buf[i] >= 128) return true
  }
  return false
}

Dbf.getNumberReader = function() {
  const read = Dbf.getStringReader('ascii')
  return function readNumber(bin, size) {
    let str = read(bin, size)
    let val = null
    if (str.indexOf(',') >= 0) {
      str = str.replace(',', '.') // handle comma decimal separator
    }
    val = parseFloat(str)
    return isNaN(val) ? null : val
  }
}

Dbf.readInt = function(bin, size) {
  return bin.readInt32()
}

Dbf.readBool = function(bin, size) {
  const c = bin.readCString(size)
  let val = null
  if (/[ty]/i.test(c)) val = true
  else if (/[fn]/i.test(c)) val = false
  return val
}

Dbf.readDate = function(bin, size) {
  let str = bin.readCString(size)
  const yr = str.substr(0, 4)
  const mo = str.substr(4, 2)
  const day = str.substr(6, 2)
  return new Date(Date.UTC(+yr, +mo - 1, +day))
}
/**
 * cf. http://code.google.com/p/stringencoding/
 * @param {*} src is a Buffer or ArrayBuffer or filename
 * @param {*} encodingArg 
 */
function DbfReader(src, encodingArg) {
  if (utils.isString(src)) {
    utils.error(`[DbfReader] Expected a buffer, not a string`)
  }
  const bin = new BinArray(src)
  const header = readHeader(bin)
  let encoding = encodingArg || null
  
  this.size = function() {
    return header.recordCount
  }
  this.readRow = function(i) {
    return getRecordReader(header.fields)(i)
  }
  this.getFields = getFieldNames
  this.getBuffer = function() {return bin.buffer()}
  this.deleteField = function(f) {
    header.fields = header.fields.filter((field) => {
      return field.name != f
    })
  }
  this.readRows = function() {
    const reader = getRecordReader(header.fields)
    const data = []
    for (let r = 0, n = this.size(); r < n; r += 1) {
      data.push(reader(r))
    }
    return data
  }
  function readHeader(bin) {
    bin.position(0).littleEndian()
    const header = {
      version: bin.readInt8(),
      updateYear: bin.readUint8(),
      updateMonth: bin.readUint8(),
      updateDay: bin.readUint8(),
      recordCount: bin.readUint32(),
      dataOffset: bin.readUint16(),
      recordSize: bin.readUint16(),
      incompleteTransaction: bin.skipBytes(2).readUint8(),
      encrypted: bin.readUint8(),
      mdx: bin.skipBytes(12).readUint8(),
      ldid: bin.readUint8()
    }
    let colOffs = 1
    let field = null
    bin.skipBytes(2)
    header.fields = []
    while (bin.peek() !== 0x0D && bin.peek() !== 0x0A && bin.position() < header.dataOffset - 1) {
      field = readFieldHeader(bin)
      field.columnOffset = colOffs
      header.fields.push(field)
      colOffs += field.size
    }
    if (colOffs !== header.recordSize) {
      utils.error(`Record length mismatch; header:${header.recordSize},detected:${colOffs}`)
    }
    if (bin.peek() !== 0x0D) {
      // 
    }
    utils.getUniqFieldNames(utils.pluck(header.fields, 'name')).forEach((name2, i) => {
      header.fields[i].name = name2
    })
    return header
  }
  function readFieldHeader(bin) {
    return {
      name: bin.readCString(11),
      type: String.fromCharCode(bin.readUint8()),
      address: bin.readUint32(),
      size: bin.readUint8(),
      decimals: bin.readUint8(),
      id: bin.skipBytes(2).readUint8(),
      position: bin.skipBytes(2).readUint8(),
      indexFlag: bin.skipBytes(7).readUint8()
    }
  }
  function getFieldNames() {
    return utils.pluck(header.fields, 'name')
  }
  function getRowOffset(r) {
    return header.dataOffset + header.recordSize * r
  }
  function getEncoding() {
    if (!encoding) {
      encoding = findStringEncoding()
      if (!encoding) {
        encoding = 'utf8'
        // stop("Unable to auto-detect the text encoding of the DBF file.\n" + Dbf.ENCODING_PROMPT)
        // utils.error('Unable to auto-detect the text encoding of the DBF file.')
      }
    }
    return encoding
  }
  /**
   * Create new record objects using object literal syntax
   */
  function getRecordConstructor() {
    let args = getFieldNames().map((name, i) => {
          return `${JSON.stringify(name)}: arguments[${i}]`
        })
    return new Function(`return {${args.join(',')}}`);
  }
  function findEofPos(bin) {
    let pos = bin.size() - 1
    if (bin.peek(pos) != 0x1A) { // last byte may or may not be EOF
      pos++
    }
    return pos
  }
  function getRecordReader(fields) {
    const readers = fields.map(getFieldReader)
    const eofOffs = findEofPos(bin)
    const create = getRecordConstructor()
    const values = []
    return function readRow(r) {
      const offs = getRowOffset(r)
      let fieldOffs = null
      let field = null
      for (let c = 0, cols = fields.length; c < cols; c += 1) {
        field = fields[c]
        fieldOffs = offs + field.columnOffset
        if (fieldOffs + field.size > eofOffs) {
          // stop('Invalid DBF file: encountered end-of-file while reading data')
          utils.error('Invalid DBF file: encountered end-of-file while reading data')
        }
        bin.position(fieldOffs)
        values[c] = readers[c](bin, field.size)
      }
      return create.apply(null, values)
    }
  }
  function getFieldReader(f) {
    const type = f.type
    let r = null
    if (type == 'I') {
      r = Dbf.readInt
    } else if (type == 'F' || type == 'N') {
      r = Dbf.getNumberReader()
    } else if (type == 'L') {
      r = Dbf.readBool
    } else if (type == 'D') {
      r = Dbf.readDate
    } else if (type == 'C') {
      r = Dbf.getStringReader(getEncoding())
    } else {
      // message("Field \"" + field.name + "\" has an unsupported type (" + field.type + ") -- converting to null values")
      r = function() {return null}
    }
    return r
  }
  function findStringEncoding() {
    const ldid = header.ldid
    const codepage = Dbf.lookupCodePage(ldid)
    const samples = getNonAsciiSamples(50)
    const only7bit = samples.length === 0
    let encoding = null
    let msg = ''
    if (codepage && ldid !== 87) {
      encoding = codepage
    } else if (only7bit) {
      encoding = 'ascii'
    }
    if (!encoding) {
      encoding = ShpConverterText.detectEncoding(samples)
    }
    if (encoding && samples.length > 0) {
      //Show a sample of decoded text if non-ascii-range text has been found
    }
    return encoding
  }
  function getNonAsciiSamples(size) {
    let samples = []
    const stringFields = header.fields.filter((f) => {
      return f.type === 'C'
    })
    const buf = utils.createBuffer(256)
    const index = {}
    let f = null
    let chars = null
    let sample = null
    let hash = null
    for (let r = 0, rows = header.recordCount; r < rows; r += 1) {
      for (let c = 0, cols = stringFields.length; c < cols; c += 1) {
        if (samples.length >= size) break
        f = stringFields[c]
        bin.position(getRowOffset(r) + f.columnOffset)
        chars = Dbf.readStringBytes(bin, f.size, buf)
        if (chars > 0 && Dbf.bufferContainsHighBit(buf, chars)) {
          sample = utils.createBuffer(buf.slice(0, chars))
          hash = sample.toString('hex')
          if (hash in index === false) {
            index[hash] = true
            samples.push(sample)
          }
        }
      }
    }
    return samples
  }
}
export {
  DbfReader,
}
export default {
  Dbf,
}