3个不稳定版本
0.2.0 | 2022年11月8日 |
---|---|
0.1.1 | 2021年3月26日 |
0.1.0 | 2021年3月23日 |
#23 in #xpath
在scraper-main中使用
10KB
196 行
XPATH Scraper
简化使用XPATH抓取网站。目前使用我的xpath解析器,该解析器尚不完整、未经文档记录,最初用于自我学习解析。
以下是一个简单的示例,也位于示例文件夹中
use std::io::Cursor;
use scraper_macros::Scraper;
use scraper_main::{
xpather,
ConvertFromValue,
ScraperMain
};
#[derive(Debug, Scraper)]
pub struct RedditList(
// Uses XPATH to find the item containers
#[scrape(xpath = r#"//div[contains(@class, "Post") and not(contains(@class, "promotedlink"))]"#)]
Vec<RedditListItem>
);
#[derive(Debug, Scraper)]
pub struct RedditListItem {
// URL of the post
#[scrape(xpath = r#".//a[@data-click-id="body"]/@href"#)]
pub url: Option<String>,
// Title of the post
#[scrape(xpath = r#".//a[@data-click-id="body"]/div/h3/text()"#)]
pub title: Option<String>,
// When it was posted
#[scrape(xpath = r#".//a[@data-click-id="timestamp"]/text()"#)]
pub timestamp: Option<String>,
// Amount of comments.
#[scrape(xpath = r#".//a[@data-click-id="comments"]/span/text()"#)]
pub comment_count: Option<String>,
// Vote count.
#[scrape(xpath = r#"./div[1]/div/div/text()"#)]
pub votes: Option<String>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Request subreddit
let resp = reqwest::get("https://www.reddit.com/r/nocontextpics/").await?;
let data = resp.text().await?;
// Parse request into a Document.
let document = xpather::parse_doc(&mut Cursor::new(data));
// Scrape RedditList struct.
let list = RedditList::scrape(&document, None)?;
// Output the scraped.
println!("{:#?}", list);
Ok(())
}
依赖
~1.5MB
~35K SLoC