mirror of https://github.com/pelias/api.git
Julian Simioni
9 years ago
58 changed files with 1089 additions and 459 deletions
@ -1,86 +0,0 @@
|
||||
|
||||
var parser = require('addressit'); |
||||
var extend = require('extend'); |
||||
var type_mapping = require('../helper/type_mapping'); |
||||
var check = require('check-types'); |
||||
var logger = require('pelias-logger').get('api'); |
||||
|
||||
var DELIM = ','; |
||||
|
||||
/* |
||||
* For performance, and to prefer POI and admin records, express a preference |
||||
* to only search coarse layers on very short text inputs. |
||||
*/ |
||||
module.exports.get_layers = function get_layers(query) { |
||||
if (query.length <= 3 ) { |
||||
// no address parsing required
|
||||
return type_mapping.layer_mapping.coarse; |
||||
} |
||||
}; |
||||
|
||||
module.exports.get_parsed_address = function get_parsed_address(query) { |
||||
|
||||
var getAdminPartsBySplittingOnDelim = function(queryParts) { |
||||
// naive approach - for admin matching during query time
|
||||
// split 'flatiron, new york, ny' into 'flatiron' and 'new york, ny'
|
||||
|
||||
var address = {}; |
||||
|
||||
if (queryParts.length > 1) { |
||||
address.name = queryParts[0].trim(); |
||||
|
||||
// 1. slice away all parts after the first one
|
||||
// 2. trim spaces from each part just in case
|
||||
// 3. join the parts back together with appropriate delimiter and spacing
|
||||
address.admin_parts = queryParts.slice(1) |
||||
.map(function (part) { return part.trim(); }) |
||||
.join(DELIM + ' '); |
||||
} |
||||
|
||||
return address; |
||||
}; |
||||
|
||||
var getAddressParts = function(query) { |
||||
// perform full address parsing
|
||||
// except on queries so short they obviously can't contain an address
|
||||
if (query.length > 3) { |
||||
return parser( query ); |
||||
} |
||||
}; |
||||
|
||||
var queryParts = query.split(DELIM); |
||||
|
||||
var addressWithAdminParts = getAdminPartsBySplittingOnDelim(queryParts); |
||||
var addressWithAddressParts= getAddressParts(queryParts.join(DELIM + ' ')); |
||||
|
||||
var parsedAddress = extend(addressWithAdminParts, |
||||
addressWithAddressParts); |
||||
|
||||
var address_parts = [ 'name', |
||||
'number', |
||||
'street', |
||||
'city', |
||||
'state', |
||||
'country', |
||||
'postalcode', |
||||
'regions', |
||||
'admin_parts' |
||||
]; |
||||
|
||||
var parsed_text = {}; |
||||
|
||||
address_parts.forEach(function(part){ |
||||
if (parsedAddress[part]) { |
||||
parsed_text[part] = parsedAddress[part]; |
||||
} |
||||
}); |
||||
|
||||
// if all we found was regions, ignore it as it is not enough information to make smarter decisions
|
||||
if (Object.keys(parsed_text).length === 1 && !check.undefined(parsed_text.regions)) |
||||
{ |
||||
logger.info('Ignoring address parser output, regions only'); |
||||
return null; |
||||
} |
||||
|
||||
return parsed_text; |
||||
}; |
@ -0,0 +1,40 @@
|
||||
|
||||
var peliasQuery = require('pelias-query'), |
||||
searchDefaults = require('../search_defaults'); |
||||
|
||||
/** |
||||
This view (unfortunately) requires autocomplete to use the phrase.* index. |
||||
|
||||
ideally we wouldn't need to use this, but at time of writing we are unable |
||||
to distinguish between 'complete tokens' and 'grams' in the name.* index. |
||||
|
||||
this view was introduced in order to score exact matches higher than partial |
||||
matches, without it we find results such as "Clayton Avenue" appearing first |
||||
in the results list for the query "Clay Av". |
||||
|
||||
the view uses some of the values from the 'search_defaults.js' file to add an |
||||
additional 'SHOULD' condition which scores exact matches slighly higher |
||||
than partial matches. |
||||
**/ |
||||
|
||||
module.exports = function( vs ){ |
||||
|
||||
// make a copy of the variables so we don't interfere with the values
|
||||
// passed to other views.
|
||||
var vsCopy = new peliasQuery.Vars( vs.export() ); |
||||
|
||||
// copy phrase:* values from search defaults
|
||||
vsCopy.var('phrase:analyzer').set(searchDefaults['phrase:analyzer']); |
||||
vsCopy.var('phrase:field').set(searchDefaults['phrase:field']); |
||||
|
||||
// get a copy of the *complete* tokens produced from the input:name
|
||||
var tokens = vs.var('input:name:tokens_complete').get(); |
||||
|
||||
// no valid tokens to use, fail now, don't render this view.
|
||||
if( !tokens || tokens.length < 1 ){ return null; } |
||||
|
||||
// set 'input:name' to be only the fully completed characters
|
||||
vsCopy.var('input:name').set( tokens.join(' ') ); |
||||
|
||||
return peliasQuery.view.phrase( vsCopy ); |
||||
}; |
@ -0,0 +1,17 @@
|
||||
|
||||
var peliasQuery = require('pelias-query'), |
||||
check = require('check-types'); |
||||
|
||||
/** |
||||
Population / Popularity subquery |
||||
**/ |
||||
|
||||
module.exports = function( vs ){ |
||||
|
||||
var view = peliasQuery.view.ngrams( vs ); |
||||
|
||||
view.match['name.default'].analyzer = vs.var('phrase:analyzer'); |
||||
delete view.match['name.default'].boost; |
||||
|
||||
return view; |
||||
}; |
@ -0,0 +1,112 @@
|
||||
|
||||
var check = require('check-types'); |
||||
|
||||
/** |
||||
simplified version of the elaticsearch tokenizer, used in order to |
||||
be able to detect which tokens are 'complete' (user has finished typing them) |
||||
or 'incomplete' (the user has possibly only typed part of the token). |
||||
|
||||
note: we don't need to strip punctuation as that will be handled on the |
||||
elasticsearch side, so sending a token such as 'st.' is not an issue, these |
||||
tokens should *not* be modified as the anaylsis can use the punctuation to |
||||
infer meaning. |
||||
|
||||
note: this sanitizer should run *after* the '_text' sanitizer so it can |
||||
use the output of clean.parsed_text where available. |
||||
**/ |
||||
function sanitize( raw, clean ){ |
||||
|
||||
// error & warning messages
|
||||
var messages = { errors: [], warnings: [] }; |
||||
|
||||
// this is the string we will use for analysis
|
||||
var text = clean.text; |
||||
|
||||
// a boolean to track whether the input parser successfully ran; or not.
|
||||
var inputParserRanSuccessfully = false; |
||||
|
||||
// if the text parser has run then we only tokenize the 'name' section
|
||||
// of the 'parsed_text' object, ignoring the 'admin' parts.
|
||||
if( clean.hasOwnProperty('parsed_text') ) { |
||||
inputParserRanSuccessfully = true; |
||||
|
||||
// parsed_text.name is set, this is the highest priority, use this string
|
||||
if( clean.parsed_text.hasOwnProperty('name') ){ |
||||
text = clean.parsed_text.name; // use this string instead
|
||||
} |
||||
|
||||
// else handle the case where parsed_text.street was produced but
|
||||
// no parsed_text.name is produced.
|
||||
// additionally, handle the case where parsed_text.number is present
|
||||
// note: the addressit module may also produce parsed_text.unit info
|
||||
// for now, we discard that information as we don't have an appropriate
|
||||
else if( clean.parsed_text.hasOwnProperty('street') ){ |
||||
text = [ |
||||
clean.parsed_text.number, |
||||
clean.parsed_text.street |
||||
].filter(function(el){return el;}) |
||||
.join(' '); // remove empty elements
|
||||
} |
||||
} |
||||
|
||||
// always set 'clean.tokens*' arrays for consistency and to avoid upstream errors.
|
||||
clean.tokens = []; |
||||
clean.tokens_complete = []; |
||||
clean.tokens_incomplete = []; |
||||
|
||||
// sanity check that the text is valid.
|
||||
if( check.nonEmptyString( text ) ){ |
||||
|
||||
// split according to the regex used in the elasticsearch tokenizer
|
||||
// see: https://github.com/pelias/schema/blob/master/settings.js
|
||||
// see: settings.analysis.tokenizer.peliasNameTokenizer
|
||||
clean.tokens = text |
||||
.split(/[\s,\\\/]+/) // split on delimeters
|
||||
.filter(function(el){return el;}); // remove empty elements
|
||||
} |
||||
|
||||
/** |
||||
the following section splits the tokens in to two arrays called |
||||
'tokens_complete' and 'tokens_incomplete'. |
||||
|
||||
it also strips any tokens from 'tokens_incomplete' which might not |
||||
match the ngrams index (such as single grams not stored in the index). |
||||
**/ |
||||
|
||||
// split the tokens in to 'complete' and 'incomplete'.
|
||||
if( clean.tokens.length ){ |
||||
|
||||
// if all the tokens are complete, simply copy them from clean.tokens
|
||||
if( inputParserRanSuccessfully ){ |
||||
|
||||
// all these tokens are complete!
|
||||
clean.tokens_complete = clean.tokens.slice(); |
||||
|
||||
// user hasn't finished typing yet
|
||||
} else { |
||||
|
||||
// make a copy of the tokens and remove the last element
|
||||
var tokensCopy = clean.tokens.slice(), |
||||
lastToken = tokensCopy.pop(); |
||||
|
||||
// set all but the last token as 'complete'
|
||||
clean.tokens_complete = tokensCopy; |
||||
|
||||
/** |
||||
if the last token is a single non-numeric character then we must discard it. |
||||
|
||||
at time of writing, single non-numeric ngrams are not stored in the index, |
||||
sending them as part of the query would result in 0 documents being returned. |
||||
**/ |
||||
if( lastToken && ( lastToken.length > 1 || lastToken.match(/[0-9]/) ) ){ |
||||
clean.tokens_incomplete = [ lastToken ]; |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
return messages; |
||||
} |
||||
|
||||
// export function
|
||||
module.exports = sanitize; |
@ -0,0 +1,155 @@
|
||||
|
||||
module.exports = { |
||||
'query': { |
||||
'filtered': { |
||||
'query': { |
||||
'bool': { |
||||
'must': [{ |
||||
'match': { |
||||
'name.default': { |
||||
'analyzer': 'peliasQueryFullToken', |
||||
'type': 'phrase', |
||||
'boost': 1, |
||||
'slop': 3, |
||||
'query': 'k road' |
||||
} |
||||
} |
||||
}], |
||||
'should':[ |
||||
{ |
||||
'match': { |
||||
'address_parts.street': { |
||||
'query': 'k road', |
||||
'boost': 5, |
||||
'analyzer': 'peliasStreet' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.country': { |
||||
'query': 'laird', |
||||
'boost': 800, |
||||
'analyzer': 'peliasAdmin' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.region': { |
||||
'query': 'laird', |
||||
'boost': 600, |
||||
'analyzer': 'peliasAdmin' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.region_a': { |
||||
'query': 'laird', |
||||
'boost': 600, |
||||
'analyzer': 'peliasAdmin' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.county': { |
||||
'query': 'laird', |
||||
'boost': 400, |
||||
'analyzer': 'peliasAdmin' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.borough': { |
||||
'analyzer': 'peliasAdmin', |
||||
'boost': 600, |
||||
'query': 'laird' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.localadmin': { |
||||
'query': 'laird', |
||||
'boost': 200, |
||||
'analyzer': 'peliasAdmin' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.locality': { |
||||
'query': 'laird', |
||||
'boost': 200, |
||||
'analyzer': 'peliasAdmin' |
||||
} |
||||
} |
||||
}, { |
||||
'match': { |
||||
'parent.neighbourhood': { |
||||
'query': 'laird', |
||||
'boost': 200, |
||||
'analyzer': 'peliasAdmin' |
||||
} |
||||
} |
||||
}, |
||||
{ |
||||
'match': { |
||||
'phrase.default': { |
||||
'analyzer' : 'peliasPhrase', |
||||
'type' : 'phrase', |
||||
'boost' : 1, |
||||
'slop' : 3, |
||||
'query' : 'k road' |
||||
} |
||||
} |
||||
}, |
||||
{ |
||||
'function_score': { |
||||
'query': { |
||||
'match': { |
||||
'name.default': { |
||||
'analyzer': 'peliasQueryFullToken', |
||||
'query': 'k road', |
||||
} |
||||
} |
||||
}, |
||||
'max_boost': 20, |
||||
'score_mode': 'first', |
||||
'boost_mode': 'replace', |
||||
'functions': [{ |
||||
'field_value_factor': { |
||||
'modifier': 'log1p', |
||||
'field': 'popularity', |
||||
'missing': 1 |
||||
}, |
||||
'weight': 1 |
||||
}] |
||||
} |
||||
},{ |
||||
'function_score': { |
||||
'query': { |
||||
'match': { |
||||
'name.default': { |
||||
'analyzer': 'peliasQueryFullToken', |
||||
'query': 'k road', |
||||
} |
||||
} |
||||
}, |
||||
'max_boost': 20, |
||||
'score_mode': 'first', |
||||
'boost_mode': 'replace', |
||||
'functions': [{ |
||||
'field_value_factor': { |
||||
'modifier': 'log1p', |
||||
'field': 'population', |
||||
'missing': 1 |
||||
}, |
||||
'weight': 3 |
||||
}] |
||||
} |
||||
}] |
||||
} |
||||
} |
||||
} |
||||
}, |
||||
'sort': [ '_score' ], |
||||
'size': 20, |
||||
'track_scores': true |
||||
}; |
@ -1,150 +0,0 @@
|
||||
var parser = require('../../../helper/text_parser'); |
||||
|
||||
var type_mapping = require('../../../helper/type_mapping'); |
||||
var layers_map = type_mapping.layer_mapping; |
||||
|
||||
module.exports.tests = {}; |
||||
|
||||
module.exports.tests.interface = function(test, common) { |
||||
test('interface', function(t) { |
||||
t.equal(typeof parser.get_parsed_address, 'function', 'valid function'); |
||||
t.equal(typeof parser.get_layers, 'function', 'valid function'); |
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.split_on_comma = function(test, common) { |
||||
var queries = [ |
||||
{ name: 'soho', admin_parts: 'new york' }, |
||||
{ name: 'chelsea', admin_parts: 'london' }, |
||||
{ name: '123 main', admin_parts: 'new york' } |
||||
]; |
||||
|
||||
queries.forEach(function (query) { |
||||
test('naive parsing ' + query, function(t) { |
||||
var address = parser.get_parsed_address(query.name + ', ' + query.admin_parts); |
||||
|
||||
t.equal(typeof address, 'object', 'valid object'); |
||||
t.equal(address.name, query.name, 'name set correctly to ' + address.name); |
||||
t.equal(address.admin_parts, query.admin_parts, 'admin_parts set correctly to ' + address.admin_parts); |
||||
t.end(); |
||||
}); |
||||
|
||||
test('naive parsing ' + query + 'without spaces', function(t) { |
||||
var address = parser.get_parsed_address(query.name + ',' + query.admin_parts); |
||||
|
||||
t.equal(typeof address, 'object', 'valid object'); |
||||
t.equal(address.name, query.name, 'name set correctly to ' + address.name); |
||||
t.equal(address.admin_parts, query.admin_parts, 'admin_parts set correctly to ' + address.admin_parts); |
||||
t.end(); |
||||
}); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.parse_three_chars_or_less = function(test, common) { |
||||
var chars_queries = ['a', 'bb', 'ccc']; |
||||
var num_queries = ['1', '12', '123']; |
||||
var alphanum_q = ['a1', '1a2', '12c']; |
||||
|
||||
var queries = chars_queries.concat(num_queries).concat(alphanum_q); |
||||
queries.forEach(function(query) { |
||||
test('query length < 3 (' + query + ')', function(t) { |
||||
var address = parser.get_parsed_address(query); |
||||
var target_layer = layers_map.coarse; |
||||
var layers = parser.get_layers(query); |
||||
|
||||
t.equal(typeof address, 'object', 'valid object'); |
||||
t.deepEqual(layers, target_layer, 'admin_parts set correctly to ' + target_layer.join(', ')); |
||||
t.end(); |
||||
}); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.parse_one_token = function(test, common) { |
||||
test('query with one token', function (t) { |
||||
var address = parser.get_parsed_address('yugolsavia'); |
||||
t.equal(address, null, 'nothing address specific detected'); |
||||
t.end(); |
||||
}); |
||||
test('query with two tokens, no numbers', function (t) { |
||||
var address = parser.get_parsed_address('small town'); |
||||
t.equal(address, null, 'nothing address specific detected'); |
||||
t.end(); |
||||
}); |
||||
test('query with two tokens, number first', function (t) { |
||||
var address = parser.get_parsed_address('123 main'); |
||||
t.equal(address, null, 'nothing address specific detected'); |
||||
t.end(); |
||||
}); |
||||
test('query with two tokens, number second', function (t) { |
||||
var address = parser.get_parsed_address('main 123'); |
||||
t.equal(address, null, 'nothing address specific detected'); |
||||
t.end(); |
||||
}); |
||||
test('query with many tokens', function(t) { |
||||
var address = parser.get_parsed_address('main particle new york'); |
||||
t.equal(address, null, 'nothing address specific detected'); |
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.parse_address = function(test, common) { |
||||
test('valid address, house number', function(t) { |
||||
var query_string = '123 main st new york ny'; |
||||
var address = parser.get_parsed_address(query_string); |
||||
|
||||
t.equal(typeof address, 'object', 'valid object for the address'); |
||||
t.equal(address.number, '123', 'parsed house number'); |
||||
t.equal(address.street, 'main st', 'parsed street'); |
||||
t.deepEqual(address.regions, ['new york'], 'parsed city'); |
||||
t.equal(address.state , 'NY', 'parsed state'); |
||||
t.end(); |
||||
}); |
||||
test('valid address, zipcode', function(t) { |
||||
var query_string = '123 main st new york ny 10010'; |
||||
var address = parser.get_parsed_address(query_string); |
||||
|
||||
t.equal(typeof address, 'object', 'valid object for the address'); |
||||
t.equal(address.number, '123', 'parsed house number'); |
||||
t.equal(address.street, 'main st', 'parsed street'); |
||||
t.deepEqual(address.regions, ['new york'], 'parsed city'); |
||||
t.equal(address.state , 'NY', 'parsed state'); |
||||
t.equal(address.postalcode, '10010', 'parsed zip is a string'); |
||||
t.end(); |
||||
}); |
||||
test('valid address with leading 0s in zipcode', function(t) { |
||||
var query_string = '339 W Main St, Cheshire, 06410'; |
||||
var address = parser.get_parsed_address(query_string); |
||||
|
||||
console.log(address); |
||||
|
||||
t.equal(typeof address, 'object', 'valid object for the address'); |
||||
t.equal(address.street, 'W Main St', 'parsed street'); |
||||
t.deepEqual(address.regions, ['Cheshire'], 'parsed city'); |
||||
t.equal(address.postalcode, '06410', 'parsed zip'); |
||||
t.end(); |
||||
}); |
||||
test('valid address without spaces after commas', function(t) { |
||||
var query_string = '339 W Main St,Lancaster,PA'; |
||||
var address = parser.get_parsed_address(query_string); |
||||
|
||||
t.equal(typeof address, 'object', 'valid object for the address'); |
||||
t.equal(address.number, '339', 'parsed house number'); |
||||
t.equal(address.street, 'W Main St', 'parsed street'); |
||||
t.deepEqual(address.regions, ['Lancaster'], 'parsed city'); |
||||
t.deepEqual(address.state, 'PA', 'parsed state'); |
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
|
||||
module.exports.all = function (tape, common) { |
||||
|
||||
function test(name, testFunction) { |
||||
return tape('QUERY PARSING: ' + name, testFunction); |
||||
} |
||||
|
||||
for( var testCase in module.exports.tests ){ |
||||
module.exports.tests[testCase](test, common); |
||||
} |
||||
}; |
@ -0,0 +1,457 @@
|
||||
var sanitiser = require('../../../sanitiser/_tokenizer'); |
||||
|
||||
module.exports.tests = {}; |
||||
|
||||
module.exports.tests.sanity_checks = function(test, common) { |
||||
test('clean.text not set', function(t) { |
||||
|
||||
var clean = {}; // clean.text not set
|
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// no tokens produced
|
||||
t.deepEquals(clean.tokens, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_complete, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('clean.text not a string', function(t) { |
||||
|
||||
var clean = { text: {} }; // clean.text not a string
|
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// no tokens produced
|
||||
t.deepEquals(clean.tokens, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_complete, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('empty string', function(t) { |
||||
|
||||
var clean = { text: '' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// no tokens produced
|
||||
t.deepEquals(clean.tokens, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_complete, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('clean.parsed_text set but clean.parsed_text.name invalid', function(t) { |
||||
|
||||
var clean = { parsed_text: { text: {} } }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// no tokens produced
|
||||
t.deepEquals(clean.tokens, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_complete, [], 'no tokens'); |
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('favor clean.parsed_text.name over clean.text', function(t) { |
||||
|
||||
var clean = { parsed_text: { name: 'foo' }, text: 'bar' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// favor clean.parsed_text.name over clean.text
|
||||
t.deepEquals(clean.tokens, [ 'foo' ], 'use clean.parsed_text.name'); |
||||
t.deepEquals(clean.tokens_complete, [ 'foo' ], 'use clean.parsed_text.name'); |
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('favor clean.parsed_text street data over clean.text', function(t) { |
||||
|
||||
var clean = { parsed_text: { number: '190', street: 'foo st' }, text: 'bar' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// favor clean.parsed_text.name over clean.text
|
||||
t.deepEquals(clean.tokens, [ '190', 'foo', 'st' ], 'use street name + number'); |
||||
t.deepEquals(clean.tokens_complete, [ '190', 'foo', 'st' ], 'use street name + number'); |
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('favor clean.parsed_text.name over clean.parsed_text street data', function(t) { |
||||
|
||||
var clean = { parsed_text: { number: '190', street: 'foo st', name: 'foo' }, text: 'bar' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// favor clean.parsed_text.name over all other variables
|
||||
t.deepEquals(clean.tokens, [ 'foo' ], 'use clean.parsed_text.name'); |
||||
t.deepEquals(clean.tokens_complete, [ 'foo' ], 'use clean.parsed_text.name'); |
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.space_delimiter = function(test, common) { |
||||
test('space delimiter - simple', function(t) { |
||||
|
||||
var clean = { text: '30 west 26th street new york' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new', |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new' |
||||
], 'tokens produced'); |
||||
|
||||
// last token marked as 'incomplete'
|
||||
t.deepEquals(clean.tokens_incomplete, [ |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('space delimiter - multiple spaces / other whitespace', function(t) { |
||||
|
||||
var clean = { text: ' 30 west \t26th \nstreet new york ' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new', |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new' |
||||
], 'tokens produced'); |
||||
|
||||
// last token marked as 'incomplete'
|
||||
t.deepEquals(clean.tokens_incomplete, [ |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.comma_delimiter = function(test, common) { |
||||
test('comma delimiter - simple', function(t) { |
||||
|
||||
var clean = { text: '30 west 26th street, new york' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new', |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new' |
||||
], 'tokens produced'); |
||||
|
||||
// last token marked as 'incomplete'
|
||||
t.deepEquals(clean.tokens_incomplete, [ |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('comma delimiter - multiple commas', function(t) { |
||||
|
||||
var clean = { text: ',30 west 26th street,,, new york,' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new', |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'30', |
||||
'west', |
||||
'26th', |
||||
'street', |
||||
'new' |
||||
], 'tokens produced'); |
||||
|
||||
// last token marked as 'incomplete'
|
||||
t.deepEquals(clean.tokens_incomplete, [ |
||||
'york' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.forward_slash_delimiter = function(test, common) { |
||||
test('forward slash delimiter - simple', function(t) { |
||||
|
||||
var clean = { text: 'Bedell Street/133rd Avenue' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'Bedell', |
||||
'Street', |
||||
'133rd', |
||||
'Avenue' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'Bedell', |
||||
'Street', |
||||
'133rd' |
||||
], 'tokens produced'); |
||||
|
||||
// last token marked as 'incomplete'
|
||||
t.deepEquals(clean.tokens_incomplete, [ |
||||
'Avenue' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('forward slash - multiple slashes', function(t) { |
||||
|
||||
var clean = { text: '/Bedell Street//133rd Avenue/' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'Bedell', |
||||
'Street', |
||||
'133rd', |
||||
'Avenue' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'Bedell', |
||||
'Street', |
||||
'133rd' |
||||
], 'tokens produced'); |
||||
|
||||
// last token marked as 'incomplete'
|
||||
t.deepEquals(clean.tokens_incomplete, [ |
||||
'Avenue' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.final_token_single_gram = function(test, common) { |
||||
test('final token single gram - numeric', function(t) { |
||||
|
||||
var clean = { text: 'grolmanstrasse 1' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'grolmanstrasse', |
||||
'1' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'grolmanstrasse', |
||||
], 'tokens produced'); |
||||
|
||||
// last token marked as 'incomplete'
|
||||
t.deepEquals(clean.tokens_incomplete, [ |
||||
'1' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('final token single gram - non-numeric', function(t) { |
||||
|
||||
var clean = { text: 'grolmanstrasse a' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'grolmanstrasse', |
||||
'a' |
||||
], 'tokens produced'); |
||||
|
||||
// all but last token marked as 'complete'
|
||||
t.deepEquals(clean.tokens_complete, [ |
||||
'grolmanstrasse', |
||||
], 'tokens produced'); |
||||
|
||||
// last token removed!
|
||||
t.deepEquals(clean.tokens_incomplete, [], 'no tokens'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.back_slash_delimiter = function(test, common) { |
||||
test('back slash delimiter - simple', function(t) { |
||||
|
||||
var clean = { text: 'Bedell Street\\133rd Avenue' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'Bedell', |
||||
'Street', |
||||
'133rd', |
||||
'Avenue' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
test('back slash - multiple slashes', function(t) { |
||||
|
||||
var clean = { text: '\\Bedell Street\\\\133rd Avenue\\' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'Bedell', |
||||
'Street', |
||||
'133rd', |
||||
'Avenue' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.tests.mixed_delimiter = function(test, common) { |
||||
test('mixed delimiters', function(t) { |
||||
|
||||
var clean = { text: ',/Bedell Street\\, \n\t ,\\//133rd Avenue, /\n/' }; |
||||
var messages = sanitiser({}, clean); |
||||
|
||||
// tokens produced
|
||||
t.deepEquals(clean.tokens, [ |
||||
'Bedell', |
||||
'Street', |
||||
'133rd', |
||||
'Avenue' |
||||
], 'tokens produced'); |
||||
|
||||
// no errors/warnings produced
|
||||
t.deepEquals(messages.errors, [], 'no errors'); |
||||
t.deepEquals(messages.warnings, [], 'no warnings'); |
||||
|
||||
t.end(); |
||||
}); |
||||
}; |
||||
|
||||
module.exports.all = function (tape, common) { |
||||
function test(name, testFunction) { |
||||
return tape('SANITISER _tokenizer: ' + name, testFunction); |
||||
} |
||||
|
||||
for( var testCase in module.exports.tests ){ |
||||
module.exports.tests[testCase](test, common); |
||||
} |
||||
}; |
Loading…
Reference in new issue