import test from 'tape'
import nlp from './_lib.js'
const here = '[three/full-api] '

//run every method once, and check against runtime errors
test('constructor api', function (t) {
  const fns = {
    tokenize: () => { nlp.tokenize("you're sure you haven't just made thousands of mistakes?") },
    plugin: () => { nlp.plugin({ words: { bloobah: 'Yeah' } }) },
    verbose: () => { nlp.verbose(false); nlp('I am the very model of a modern Major-General') },
    version: () => { nlp.version },
    all: () => { nlp('this is yelling').match('#Verb').toTitleCase().all().text() },
    found: () => { nlp('oh say can you see?').match('see').found },
    wordCount: () => { nlp('this is yelling').wordCount() },
    length: () => { nlp('jackie kennedy and aristotle onassis').match('#Person+').length },
    clone: () => { nlp('would somebody please think of the children').clone().toUpperCase().all().text() },
    cache: () => { let doc = nlp("I'm looking for Amanda Hugginkiss").cache({ root: true }); doc.match('~look~') },
    uncache: () => { let doc = nlp("urine-soaked hell-hole").uncache(); doc.tag("Insult") },
    first: () => { nlp('foo').first() },
    last: () => { nlp('foo').last() },
    slice: () => { nlp('Homer, have you been eating that sandwich again?').terms().slice(0, 3).text() },
    eq: () => { nlp('foo').eq(1) },
    firstTerm: () => { nlp('foo').firstTerm() },
    lastTerm: () => { nlp('foo').lastTerm() },
    termList: () => { nlp('foo').termList() },
    match: () => { nlp('we understand, we are from the land of chocolate.').match('land of #Noun').text() },
    not: () => { nlp('wait, there’s a new mexico?').match('#Place').not('new').text() },
    matchOne: () => { nlp('foo').matchOne() },
    if: () => { nlp('We’re here, we’re clear, we don’t want anymore bears.').clauses().if('anymore').text() },
    ifNo: () => { nlp('We’re here, we’re clear, we don’t want anymore bears.').clauses().ifNo('anymore').text() },
    has: () => { nlp('I am the very model of a modern Major-General').has('#Pronoun') },
    lookBehind: () => { nlp('foo').lookBehind() },
    lookAhead: () => { nlp('foo').lookAhead() },
    before: () => { nlp('one two three four five').before('three').text() },
    after: () => { nlp('one two three four five').after('three').text() },
    lookup: () => { nlp('chocolate microscopes? double guitars?').lookup(['double guitars']).length },
    toUpperCase: () => { nlp('Dental plan. Lisa needs braces.').match('dental .').toUpperCase().text() },
    toLowerCase: () => { nlp('Careful! They’re RUFFLED!!').toLowerCase().text() },
    toTitleCase: () => { nlp('jupiter, pluto and mars').match('#Noun').toTitleCase().all().text() },
    toCamelCase: () => { nlp('natural language processing').toCamelCase().text() },
    pre: () => { nlp("we're here. we're clear. we don't want anymore bears.").pre("  ") },
    post: () => { nlp("we're here. we're clear. we don't want anymore bears.").post('!') },
    trim: () => { nlp(' Lenny and Carl ').match('#Person').trim().text() },
    hyphenate: () => { nlp('natural language processing').hyphenate().text() },
    dehyphenate: () => { nlp('natural-language processing').dehyphenate().text() },
    tag: () => { nlp('Michael Apple ate a delicious apple.').match('#FirstName apple').tag('Person').all().match('#Person+').text() },
    tagSafe: () => { nlp('foo').tagSafe() },
    unTag: () => { nlp('they made a catch & scored a run').match('(run|catch)').unTag('#Verb').all().match('#Verb').out('array') },
    canBe: () => { nlp('it’s fusilli jerry!').canBe('Person').text() },
    map: () => { nlp('yahoo serious festival').terms().map((m) => m.toUpperCase()).text() },
    forEach: () => { nlp('Oh, no! Bette Midler!').match('#Person+').forEach((m) => m.text()) },
    filter: () => { nlp('Hey, anymore arboretum’s around here?').terms().filter(m => m.has('#Plural')).length },
    find: () => { nlp('Always do the opposite of what bart says').terms().find(m => m.out('normal').match(/b[ao]rt/)).text() },
    some: () => { nlp('Don’t make me run, I’m full of chocolate!').terms().some(m => m.out('normal') === 'run') },
    random: () => { nlp('one two three four').terms().random(2).out('array') },
    replaceWith: () => { nlp('it was the worst of times').match('worst').replaceWith('blurst', true).all().text() },
    replace: () => { nlp('trust me folks, big league.').replace('big league', 'bigly').all().text() },
    delete: () => { nlp('you don’t win friends with salad').delete('do not').text() },
    append: () => { nlp('i know so many words').insertAfter('bigly').all().text() },
    prepend: () => { nlp('stupid flanders').match('flanders').insertBefore('sexy').all().text() },
    concat: () => { nlp('My name is Otto').concat('and i love to get blotto').all().length },
    sort: () => { nlp('Larry, Curly, Moe').terms().sort('alphabetical').out('array') },
    reverse: () => { nlp('foo').reverse() },
    normalize: () => { nlp(' so... you like   DONUTS? have all the donuts in the WORLD!!!').normalize().all().get(0).text() },
    unique: () => { nlp('foo').unique() },
    split: () => { nlp('Monorail...Once again! Monorail... Monorail!').splitOn('monorail').eq(0).text() },
    splitAfter: () => { nlp('Monorail...Once again! Monorail... Monorail!').splitAfter('monorail').eq(0).text() },
    splitBefore: () => { nlp('Monorail...Once again! Monorail... Monorail!').splitBefore('monorail').eq(0).text() },
    text: () => { nlp('you might say there’s a little Uter in all of us').match('#Adjective uter').out('array') },
    out: () => { nlp('foo').out() },
    json: () => { nlp('The stage was set for the Alan Parsons Project! Which I believe was some sort of hovercraft.').data() },
    terms: () => { nlp('we should all be more like little Ruttiger').terms().json() },
    clauses: () => { nlp('All right, Colossus, you’re free to go, but stay away from Death Mountain').clauses().data() },
    hyphenated: () => { nlp('foo').hyphenated() },
    phoneNumbers: () => { nlp('Moe Sizlak. That’s right. I’m a surgeon. (800) 555-0000.').phoneNumbers().json() },
    hashTags: () => { nlp('oh, but where is the #anykey').hashTags().json() },
    emails: () => { nlp('foo').emails() },
    emoticons: () => { nlp('foo').emoticons() },
    emoji: () => { nlp('foo').emoji() },
    atMentions: () => { nlp('foo').atMentions() },
    urls: () => { nlp('thank you http://simpsons.wikia.com').urls().json() },
    adverbs: () => { nlp('foo').adverbs() },
    pronouns: () => { nlp('foo').pronouns() },
    conjunctions: () => { nlp('foo').conjunctions() },
    prepositions: () => { nlp('foo').prepositions() },
    abbreviations: () => { nlp('foo').abbreviations() },
    contractions: () => { nlp('foo').contractions() },
    parentheses: () => { nlp('Use a pointed stick (a pencil) or a similar tool').parentheses().data() },
    possessives: () => { nlp('moe’s tavern').possessives().text() },
    quotations: () => { nlp('the he said "crazy like a fox!".').quotations().data().length },
    acronyms: () => { nlp('foo').acronyms() },
    nouns: () => { nlp('foo').nouns() },
    verbs: () => { nlp('Moe Sizlak. That’s right. I’m a surgeon.').verbs() },
    // debug: () => { nlp('foo').debug() },
  }
  Object.keys(fns).forEach(k => {
    t.doesNotThrow(() => {
      try {
        fns[k]()
      } catch (e) {
        t.fail(here + k + e)
      }
    }, k)
  })
  t.end()
})
