forked from extern/se-scraper
new version
This commit is contained in:
parent
d4d06f7d67
commit
5bf7c94b9a
62
README.md
62
README.md
@ -108,6 +108,13 @@ const se_scraper = require('se-scraper');
|
|||||||
search_engine: 'google',
|
search_engine: 'google',
|
||||||
keywords: ['news', 'se-scraper'],
|
keywords: ['news', 'se-scraper'],
|
||||||
num_pages: 1,
|
num_pages: 1,
|
||||||
|
// add some cool google search settings
|
||||||
|
google_settings: {
|
||||||
|
gl: 'us', // The gl parameter determines the Google country to use for the query.
|
||||||
|
hl: 'en', // The hl parameter determines the Google UI language to return results.
|
||||||
|
start: 0, // Determines the results offset to use, defaults to 0.
|
||||||
|
num: 100, // Determines the number of results to show, defaults to 10. Maximum is 100.
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
var scraper = new se_scraper.ScrapeManager(browser_config);
|
var scraper = new se_scraper.ScrapeManager(browser_config);
|
||||||
@ -286,14 +293,13 @@ Use **se-scraper** by calling it with a script such as the one below.
|
|||||||
```js
|
```js
|
||||||
const se_scraper = require('se-scraper');
|
const se_scraper = require('se-scraper');
|
||||||
|
|
||||||
let config = {
|
// those options need to be provided on startup
|
||||||
|
// and cannot give to se-scraper on scrape() calls
|
||||||
|
let browser_config = {
|
||||||
// the user agent to scrape with
|
// the user agent to scrape with
|
||||||
user_agent: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
|
user_agent: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
|
||||||
// if random_user_agent is set to True, a random user agent is chosen
|
// if random_user_agent is set to True, a random user agent is chosen
|
||||||
random_user_agent: true,
|
random_user_agent: false,
|
||||||
// how long to sleep between requests. a random sleep interval within the range [a,b]
|
|
||||||
// is drawn before every request. empty string for no sleeping.
|
|
||||||
sleep_range: '',
|
|
||||||
// whether to start the browser in headless mode
|
// whether to start the browser in headless mode
|
||||||
headless: true,
|
headless: true,
|
||||||
// whether debug information should be printed
|
// whether debug information should be printed
|
||||||
@ -304,11 +310,6 @@ let config = {
|
|||||||
debug_level: 1,
|
debug_level: 1,
|
||||||
// specify flags passed to chrome here
|
// specify flags passed to chrome here
|
||||||
chrome_flags: [],
|
chrome_flags: [],
|
||||||
// path to output file, data will be stored in JSON
|
|
||||||
output_file: 'examples/results/baidu.json',
|
|
||||||
// whether to prevent images, css, fonts from being loaded
|
|
||||||
// will speed up scraping a great deal
|
|
||||||
block_assets: false,
|
|
||||||
// path to js module that extends functionality
|
// path to js module that extends functionality
|
||||||
// this module should export the functions:
|
// this module should export the functions:
|
||||||
// get_browser, handle_metadata, close_browser
|
// get_browser, handle_metadata, close_browser
|
||||||
@ -323,14 +324,6 @@ let config = {
|
|||||||
// socks5://78.94.172.42:1080
|
// socks5://78.94.172.42:1080
|
||||||
// http://118.174.233.10:48400
|
// http://118.174.233.10:48400
|
||||||
proxy_file: '',
|
proxy_file: '',
|
||||||
// check if headless chrome escapes common detection techniques
|
|
||||||
// this is a quick test and should be used for debugging
|
|
||||||
test_evasion: false,
|
|
||||||
apply_evasion_techniques: true,
|
|
||||||
// log ip address data
|
|
||||||
log_ip_address: false,
|
|
||||||
// log http headers
|
|
||||||
log_http_headers: false,
|
|
||||||
puppeteer_cluster_config: {
|
puppeteer_cluster_config: {
|
||||||
timeout: 10 * 60 * 1000, // max timeout set to 10 minutes
|
timeout: 10 * 60 * 1000, // max timeout set to 10 minutes
|
||||||
monitor: false,
|
monitor: false,
|
||||||
@ -340,18 +333,43 @@ let config = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
(async () => {
|
(async () => {
|
||||||
|
// scrape config can change on each scrape() call
|
||||||
let scrape_config = {
|
let scrape_config = {
|
||||||
// which search engine to scrape
|
// which search engine to scrape
|
||||||
search_engine: 'bing',
|
search_engine: 'google',
|
||||||
// an array of keywords to scrape
|
// an array of keywords to scrape
|
||||||
keywords: ['cat', 'mouse'],
|
keywords: ['cat', 'mouse'],
|
||||||
// alternatively you can specify a keyword_file. this overwrites the keywords array
|
|
||||||
keyword_file: '',
|
|
||||||
// the number of pages to scrape for each keyword
|
// the number of pages to scrape for each keyword
|
||||||
num_pages: 2,
|
num_pages: 2,
|
||||||
|
|
||||||
|
// OPTIONAL PARAMS BELOW:
|
||||||
|
google_settings: {
|
||||||
|
gl: 'us', // The gl parameter determines the Google country to use for the query.
|
||||||
|
hl: 'fr', // The hl parameter determines the Google UI language to return results.
|
||||||
|
start: 0, // Determines the results offset to use, defaults to 0.
|
||||||
|
num: 100, // Determines the number of results to show, defaults to 10. Maximum is 100.
|
||||||
|
},
|
||||||
|
// instead of keywords you can specify a keyword_file. this overwrites the keywords array
|
||||||
|
keyword_file: '',
|
||||||
|
// how long to sleep between requests. a random sleep interval within the range [a,b]
|
||||||
|
// is drawn before every request. empty string for no sleeping.
|
||||||
|
sleep_range: '',
|
||||||
|
// path to output file, data will be stored in JSON
|
||||||
|
output_file: 'output.json',
|
||||||
|
// whether to prevent images, css, fonts from being loaded
|
||||||
|
// will speed up scraping a great deal
|
||||||
|
block_assets: false,
|
||||||
|
// check if headless chrome escapes common detection techniques
|
||||||
|
// this is a quick test and should be used for debugging
|
||||||
|
test_evasion: false,
|
||||||
|
apply_evasion_techniques: true,
|
||||||
|
// log ip address data
|
||||||
|
log_ip_address: false,
|
||||||
|
// log http headers
|
||||||
|
log_http_headers: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
let results = await se_scraper.scrape(config, scrape_config);
|
let results = await se_scraper.scrape(browser_config, scrape_config);
|
||||||
console.dir(results, {depth: null, colors: true});
|
console.dir(results, {depth: null, colors: true});
|
||||||
})();
|
})();
|
||||||
```
|
```
|
||||||
|
2
TODO.md
2
TODO.md
@ -42,7 +42,7 @@
|
|||||||
|
|
||||||
### 11.6.2019
|
### 11.6.2019
|
||||||
- TODO: fix amazon scraping
|
- TODO: fix amazon scraping
|
||||||
- change api of remaining test cases
|
- change api of remaining test cases [done]
|
||||||
- TODO: implement custom search engine parameters on scrape()
|
- TODO: implement custom search engine parameters on scrape()
|
||||||
|
|
||||||
### TODO:
|
### TODO:
|
||||||
|
@ -10,6 +10,13 @@ const se_scraper = require('./../src/node_scraper.js');
|
|||||||
search_engine: 'google',
|
search_engine: 'google',
|
||||||
keywords: ['news', 'se-scraper'],
|
keywords: ['news', 'se-scraper'],
|
||||||
num_pages: 1,
|
num_pages: 1,
|
||||||
|
// add some cool google search settings
|
||||||
|
google_settings: {
|
||||||
|
gl: 'us', // The gl parameter determines the Google country to use for the query.
|
||||||
|
hl: 'en', // The hl parameter determines the Google UI language to return results.
|
||||||
|
start: 0, // Determines the results offset to use, defaults to 0.
|
||||||
|
num: 100, // Determines the number of results to show, defaults to 10. Maximum is 100.
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
var scraper = new se_scraper.ScrapeManager(browser_config);
|
var scraper = new se_scraper.ScrapeManager(browser_config);
|
||||||
|
File diff suppressed because it is too large
Load Diff
3206
output.json
Normal file
3206
output.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "se-scraper",
|
"name": "se-scraper",
|
||||||
"version": "1.3.0",
|
"version": "1.3.1",
|
||||||
"description": "A module using puppeteer to scrape several search engines such as Google, Duckduckgo, Bing or Baidu",
|
"description": "A module using puppeteer to scrape several search engines such as Google, Duckduckgo, Bing or Baidu",
|
||||||
"homepage": "https://scrapeulous.com/",
|
"homepage": "https://scrapeulous.com/",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
|
57
run.js
57
run.js
@ -1,13 +1,12 @@
|
|||||||
const se_scraper = require('./index.js');
|
const se_scraper = require('./index.js');
|
||||||
|
|
||||||
let config = {
|
// those options need to be provided on startup
|
||||||
|
// and cannot give to se-scraper on scrape() calls
|
||||||
|
let browser_config = {
|
||||||
// the user agent to scrape with
|
// the user agent to scrape with
|
||||||
user_agent: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
|
user_agent: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
|
||||||
// if random_user_agent is set to True, a random user agent is chosen
|
// if random_user_agent is set to True, a random user agent is chosen
|
||||||
random_user_agent: true,
|
random_user_agent: false,
|
||||||
// how long to sleep between requests. a random sleep interval within the range [a,b]
|
|
||||||
// is drawn before every request. empty string for no sleeping.
|
|
||||||
sleep_range: '',
|
|
||||||
// whether to start the browser in headless mode
|
// whether to start the browser in headless mode
|
||||||
headless: true,
|
headless: true,
|
||||||
// whether debug information should be printed
|
// whether debug information should be printed
|
||||||
@ -18,11 +17,6 @@ let config = {
|
|||||||
debug_level: 1,
|
debug_level: 1,
|
||||||
// specify flags passed to chrome here
|
// specify flags passed to chrome here
|
||||||
chrome_flags: [],
|
chrome_flags: [],
|
||||||
// path to output file, data will be stored in JSON
|
|
||||||
output_file: 'examples/results/baidu.json',
|
|
||||||
// whether to prevent images, css, fonts from being loaded
|
|
||||||
// will speed up scraping a great deal
|
|
||||||
block_assets: false,
|
|
||||||
// path to js module that extends functionality
|
// path to js module that extends functionality
|
||||||
// this module should export the functions:
|
// this module should export the functions:
|
||||||
// get_browser, handle_metadata, close_browser
|
// get_browser, handle_metadata, close_browser
|
||||||
@ -37,35 +31,52 @@ let config = {
|
|||||||
// socks5://78.94.172.42:1080
|
// socks5://78.94.172.42:1080
|
||||||
// http://118.174.233.10:48400
|
// http://118.174.233.10:48400
|
||||||
proxy_file: '',
|
proxy_file: '',
|
||||||
// check if headless chrome escapes common detection techniques
|
|
||||||
// this is a quick test and should be used for debugging
|
|
||||||
test_evasion: false,
|
|
||||||
apply_evasion_techniques: true,
|
|
||||||
// log ip address data
|
|
||||||
log_ip_address: false,
|
|
||||||
// log http headers
|
|
||||||
log_http_headers: false,
|
|
||||||
puppeteer_cluster_config: {
|
puppeteer_cluster_config: {
|
||||||
timeout: 10 * 60 * 1000, // max timeout set to 10 minutes
|
timeout: 10 * 60 * 1000, // max timeout set to 10 minutes
|
||||||
monitor: false,
|
monitor: false,
|
||||||
concurrency: 1, // one scraper per tab
|
concurrency: 1, // one scraper per tab
|
||||||
maxConcurrency: 1, // scrape with 2 tabs
|
maxConcurrency: 1, // scrape with 1 tab
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
(async () => {
|
(async () => {
|
||||||
|
// scrape config can change on each scrape() call
|
||||||
let scrape_config = {
|
let scrape_config = {
|
||||||
// which search engine to scrape
|
// which search engine to scrape
|
||||||
search_engine: 'bing',
|
search_engine: 'google',
|
||||||
// an array of keywords to scrape
|
// an array of keywords to scrape
|
||||||
keywords: ['cat', 'mouse'],
|
keywords: ['cat', 'mouse'],
|
||||||
// alternatively you can specify a keyword_file. this overwrites the keywords array
|
|
||||||
keyword_file: '',
|
|
||||||
// the number of pages to scrape for each keyword
|
// the number of pages to scrape for each keyword
|
||||||
num_pages: 2,
|
num_pages: 2,
|
||||||
|
|
||||||
|
// OPTIONAL PARAMS BELOW:
|
||||||
|
google_settings: {
|
||||||
|
gl: 'us', // The gl parameter determines the Google country to use for the query.
|
||||||
|
hl: 'fr', // The hl parameter determines the Google UI language to return results.
|
||||||
|
start: 0, // Determines the results offset to use, defaults to 0.
|
||||||
|
num: 100, // Determines the number of results to show, defaults to 10. Maximum is 100.
|
||||||
|
},
|
||||||
|
// instead of keywords you can specify a keyword_file. this overwrites the keywords array
|
||||||
|
keyword_file: '',
|
||||||
|
// how long to sleep between requests. a random sleep interval within the range [a,b]
|
||||||
|
// is drawn before every request. empty string for no sleeping.
|
||||||
|
sleep_range: '',
|
||||||
|
// path to output file, data will be stored in JSON
|
||||||
|
output_file: 'examples/results/advanced.json',
|
||||||
|
// whether to prevent images, css, fonts from being loaded
|
||||||
|
// will speed up scraping a great deal
|
||||||
|
block_assets: false,
|
||||||
|
// check if headless chrome escapes common detection techniques
|
||||||
|
// this is a quick test and should be used for debugging
|
||||||
|
test_evasion: false,
|
||||||
|
apply_evasion_techniques: true,
|
||||||
|
// log ip address data
|
||||||
|
log_ip_address: false,
|
||||||
|
// log http headers
|
||||||
|
log_http_headers: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
let results = await se_scraper.scrape(config, scrape_config);
|
let results = await se_scraper.scrape(browser_config, scrape_config);
|
||||||
console.dir(results, {depth: null, colors: true});
|
console.dir(results, {depth: null, colors: true});
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ class ScrapeManager {
|
|||||||
|
|
||||||
this.cluster.on('taskerror', (err, data) => {
|
this.cluster.on('taskerror', (err, data) => {
|
||||||
console.log(`Error while scraping ${data}: ${err.message}`);
|
console.log(`Error while scraping ${data}: ${err.message}`);
|
||||||
console.log(err)
|
console.log(err);
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -261,9 +261,13 @@ class ScrapeManager {
|
|||||||
* Scrapes the keywords specified by the config.
|
* Scrapes the keywords specified by the config.
|
||||||
*/
|
*/
|
||||||
async scrape(scrape_config = {}) {
|
async scrape(scrape_config = {}) {
|
||||||
this.config.keywords = scrape_config.keywords;
|
|
||||||
this.config.num_pages = scrape_config.num_pages;
|
if (!scrape_config.keywords && !scrape_config.keyword_file) {
|
||||||
this.config.search_engine = scrape_config.search_engine;
|
console.error('Either keywords or keyword_file must be supplied to scrape()')
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Object.assign(this.config, scrape_config);
|
||||||
|
|
||||||
var results = {};
|
var results = {};
|
||||||
var num_requests = 0;
|
var num_requests = 0;
|
||||||
@ -283,7 +287,7 @@ class ScrapeManager {
|
|||||||
// const page = await this.browser.newPage();
|
// const page = await this.browser.newPage();
|
||||||
// this.scraper = getScraper(this.config.search_engine, {
|
// this.scraper = getScraper(this.config.search_engine, {
|
||||||
// config: this.config,
|
// config: this.config,
|
||||||
// context: context,
|
// context: {},
|
||||||
// pluggable: pluggable,
|
// pluggable: pluggable,
|
||||||
// page: page,
|
// page: page,
|
||||||
// });
|
// });
|
||||||
@ -382,14 +386,9 @@ class ScrapeManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
headers: {
|
|
||||||
'Content-Type': 'text/json',
|
|
||||||
},
|
|
||||||
results: results,
|
results: results,
|
||||||
metadata: metadata || {},
|
metadata: metadata || {},
|
||||||
statusCode: 200
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -436,7 +435,6 @@ function parseEventData(config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
ScrapeManager: ScrapeManager,
|
ScrapeManager: ScrapeManager,
|
||||||
};
|
};
|
@ -30,8 +30,6 @@ async function normal_search_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function normal_search_test_case(response) {
|
function normal_search_test_case(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 2);
|
assert.equal(response.metadata.num_requests, 2);
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
@ -113,8 +111,6 @@ async function no_results_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function test_case_no_results(response) {
|
function test_case_no_results(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 1);
|
assert.equal(response.metadata.num_requests, 1);
|
||||||
|
|
||||||
results = response.results;
|
results = response.results;
|
||||||
|
@ -30,8 +30,6 @@ async function normal_search_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function normal_search_test_case(response) {
|
function normal_search_test_case(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 4);
|
assert.equal(response.metadata.num_requests, 4);
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
|
@ -35,8 +35,6 @@ async function normal_search_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function normal_search_test_case(response) {
|
function normal_search_test_case(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 6);
|
assert.equal(response.metadata.num_requests, 6);
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
@ -113,8 +111,6 @@ async function no_results_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function test_case_no_results(response) {
|
function test_case_no_results(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 1);
|
assert.equal(response.metadata.num_requests, 1);
|
||||||
|
|
||||||
results = response.results;
|
results = response.results;
|
||||||
@ -164,8 +160,6 @@ async function effective_query_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function test_case_effective_query(response) {
|
function test_case_effective_query(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 1);
|
assert.equal(response.metadata.num_requests, 1);
|
||||||
|
|
||||||
results = response.results;
|
results = response.results;
|
||||||
|
@ -32,8 +32,6 @@ async function normal_search_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function normal_search_test_case(response) {
|
function normal_search_test_case(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 2);
|
assert.equal(response.metadata.num_requests, 2);
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
@ -105,8 +103,6 @@ async function effective_query_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function test_case_effective_query(response) {
|
function test_case_effective_query(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 1);
|
assert.equal(response.metadata.num_requests, 1);
|
||||||
|
|
||||||
results = response.results;
|
results = response.results;
|
||||||
|
@ -32,8 +32,6 @@ async function normal_search_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function normal_search_test_case(response) {
|
function normal_search_test_case(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 6);
|
assert.equal(response.metadata.num_requests, 6);
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
@ -108,8 +106,6 @@ async function no_results_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function test_case_no_results(response) {
|
function test_case_no_results(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 1);
|
assert.equal(response.metadata.num_requests, 1);
|
||||||
|
|
||||||
results = response.results;
|
results = response.results;
|
||||||
@ -159,8 +155,6 @@ async function effective_query_test() {
|
|||||||
|
|
||||||
// we test with a callback function to our handler
|
// we test with a callback function to our handler
|
||||||
function test_case_effective_query(response) {
|
function test_case_effective_query(response) {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 1);
|
assert.equal(response.metadata.num_requests, 1);
|
||||||
|
|
||||||
results = response.results;
|
results = response.results;
|
||||||
|
@ -34,8 +34,6 @@ function normal_image_search_test_case(err, response) {
|
|||||||
if (err) {
|
if (err) {
|
||||||
console.error(err);
|
console.error(err);
|
||||||
} else {
|
} else {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 2);
|
assert.equal(response.metadata.num_requests, 2);
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
|
@ -35,8 +35,6 @@ function queryargs_search_test_case(err, response) {
|
|||||||
if (err) {
|
if (err) {
|
||||||
console.error(err);
|
console.error(err);
|
||||||
} else {
|
} else {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
assert.equal(response.metadata.num_requests, 2);
|
assert.equal(response.metadata.num_requests, 2);
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
|
@ -34,8 +34,6 @@ function reuters_search_test_case(err, response) {
|
|||||||
if (err) {
|
if (err) {
|
||||||
console.error(err);
|
console.error(err);
|
||||||
} else {
|
} else {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
let total_rank = 1;
|
let total_rank = 1;
|
||||||
@ -103,8 +101,6 @@ function cnbc_search_test_case(err, response) {
|
|||||||
if (err) {
|
if (err) {
|
||||||
console.error(err);
|
console.error(err);
|
||||||
} else {
|
} else {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
let total_rank = 1;
|
let total_rank = 1;
|
||||||
@ -170,8 +166,6 @@ function marketwatch_search_test_case(err, response) {
|
|||||||
if (err) {
|
if (err) {
|
||||||
console.error(err);
|
console.error(err);
|
||||||
} else {
|
} else {
|
||||||
assert.equal(response.headers['Content-Type'], 'text/json', 'content type is not text/json');
|
|
||||||
assert.equal(response.statusCode, 200, 'status code must be 200');
|
|
||||||
|
|
||||||
for (let query in response.results) {
|
for (let query in response.results) {
|
||||||
let total_rank = 1;
|
let total_rank = 1;
|
||||||
|
Loading…
Reference in New Issue
Block a user