但愿能够从各大技术论坛抓取本身感兴趣的问题。javascript
|-- env
|-- csdn.env
|-- segmentfault.env
|-- index.js
复制代码
代码逻辑特别简单,主要流程以下:html
由于puppeteer很容易仿真用户在浏览器上的行为,因此DEMO的核心在于如何获取数据,或者说如何实现get_news
方法。前端
const puppeteer = require("puppeteer");
(async () => {
// 1. Open browser
const browser = await puppeteer.launch({});
// 2. Create a new page
const page = await browser.newPage();
// 3. Go to the target website
await page.goto(url, { waitUntil: "networkidle2" });
// 4. Get data
let data = await page.evaluate(get_news);
// 5. Print out the data in the console
console.log(data);
// 6. Close browser
await browser.close();
})();
// Getting news
function get_news() {
// To do something to get news
}
复制代码
以CSDN为例,以下图所示,在网页里咱们能够很容易的经过原生的DOM操做document.querySelector(selector)
或者jQuery的DOM操做$(selector)
来找到页面上的元素,从而获取页面信息。在puppeteer的page.evaluate
方法同时支持原生的DOM操做和jQuery的DOM操做,所以咱们获取页面数据就会变得很容易。具体代码以下所示。vue
function get_news() {
let result = [];
let titles = $(".forums_title");
let dates = $(".forums_author em");
titles.map((i, title) => {
result.push({
title: title.text,
link: title.href,
date: dates[i].textContent
});
});
return result;
}
复制代码
如今运行代码能够在控制台中打印出抓取到的网页数据,以下图所示。你一样能够将数据写入到数据库。java
代码里还有不少的细节实现,由于都有详细的注释我就不一一展开了,感兴趣的小伙伴能够阅读后面的代码。主要包括的技术细节以下:node
page.evaluate
里用console调试我在前端上仍是个小白,代码质量可能不高,若是有什么问题但愿你们在评论区里及时指出,帮助小白成长,感激涕零!!!mysql
1. index.js
const puppeteer = require("puppeteer");
const {
resolve
} = require("path");
(async(path_name, start_time) = >{
// 1. Analytical path of environmental variables
let dotenvPath = resolve(__dirname, "env", path_name);
require("dotenv").config({
path: dotenvPath
});
// 2. Open browser
const browser = await puppeteer.launch({});
// 3. Create a new page
const page = await browser.newPage();
// Catch headless navigator's console event
page.on("console", msg = >{
for (let i = 0; i < msg.args().length; ++i) {
console.log(`$ {i}: $ {msg.args()[i]}`);
}
});
// 4. Getting env variables
let tags = JSON.parse(process.env.TAGS);
let titles = process.env.SELECTOR_TITLES;
let dates = process.env.SELECTOR_DATES;
let keywords = JSON.parse(process.env.KEYWORDS);
let time_interval = process.env.TIME_INTERVAL;
let para = { path_name, start_time, time_interval, titles, dates, keywords};
// Get page url based on label and page index
const get_news_url = (tag, pageIndex) = >process.env.LIST_URL.replace("{tag}", tag).replace("{pageIndex}", pageIndex);
// 5. Traverse through all tags to get data
await Promise.all(tags.map(async tag => {
let i = 0;
while (true) {
// 1) Go to the specified page
await page.goto(get_news_url(tag, ++i), { waitUntil: "networkidle2" });
// 2) Get data by function get_news
let _titles = await page.evaluate(get_news, para);
// 3) Stop the loop if it can't find the required data
if (_titles.length === 0) break;
// 4) Output captured data in console
console.log(i, get_news_url(tag, i));
console.log(_titles);
}
}));
// 6. Close browser
await browser.close();
})(process.env.PATH_NAME, process.env.START_TIME);
// Getting news
async
function get_news(para) {
// Get release time of issue
const get_release_time = dom => {
if (path_name === "csdn.env") return dom.textContent;
if (path_name === "segmentfault.env") return new Date(dom.dataset.created * 1000);
}
// Check whether the issue release time is within the valid time interval
const validate_time = (time, start_time) => {
let time_diff = (new Date(time)) - (new Date(start_time));
return (time_diff > 0) && (time_diff < time_interval);
}
// Check to see if the keyword is included
const validate_keyword = (keywords, title) => !!keywords.find(keyword = >(new RegExp(keyword)).test(title))
// 1. Waiting for callback data
let { path_name, start_time, time_interval, titles, dates, keywords } = await Promise.resolve(para);
// 2. Traverse the page data to find the required data
let result = [];
$(titles).map((i, title) => {
// 1) Verify that the data is valid in time
let check_time = validate_time(get_release_time($(dates)[i]), start_time);
if (!check_time) return;
// 2) Verify that the data contains the specified keywords
let check_keyword = validate_keyword(keywords, a.text);
if (!check_keyword) return;
result.push({
title: title.text,
link: title.href,
date: get_release_time($(dates)[i]).toString()
});
});
return result;
}
复制代码
2. csdn.env
LIST_URL=https://bbs.csdn.net/forums/{tag}?page={pageIndex}
TAGS=["CSharp","DotNET"]
KEYWORDS=[".net","C#","c#"]
SELECTOR_TITLES=.forums_topic .forums_title
SELECTOR_DATES=.forums_author em
复制代码
3. segmentfault.env
LIST_URL=https://segmentfault.com/questions/unanswered?page={pageIndex}
TAGS=[""]
KEYWORDS=["js","mysql","vue","html","javascript"]
SELECTOR_TITLES=.title a
SELECTOR_DATES=.askDate
复制代码
4. package.json
{
"name": "fetch-question",
"version": "1.0.0",
"description": "fetch questions from internet",
"main": "index.js",
"dependencies": {
"cross-env": "^5.2.0",
"dotenv": "^7.0.0",
"puppeteer": "^1.13.0"
},
"devDependencies": {},
"scripts": {
"csdn:list": "cross-env PATH_NAME=csdn.env START_TIME=2019/3/18 TIME_INTERVAL=172800000 node index.js",
"segmentfault:list": "cross-env PATH_NAME=segmentfault.env START_TIME=2019/3/18 TIME_INTERVAL=172800000 node index.js",
},
"author": "linli",
"license": "ISC"
}
复制代码