1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
|
use std::fmt;
use crate::{
error::{EntryError, PullError},
status::PullStatus,
};
use ansi_term::{Color, Style};
use chrono::prelude::*;
use feed_rs::{
model::{Entry as ChannelEntry, Feed as Channel},
parser,
};
use serde::{Deserialize, Serialize};
use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Feed {
// channel url
pub link: Url,
// channel data
entries: Vec<Entry>,
// channel meta
title: String,
html_link: String,
}
impl Feed {
pub fn new(link: Url) -> Self {
Self {
link,
entries: Vec::new(),
title: String::new(),
html_link: String::new(),
}
}
pub fn total_count(&self) -> usize {
self.entries.len()
}
pub fn last_updated(&self) -> DateTime<Utc> {
self.entries
.iter()
.map(|e| e.published)
.max()
.unwrap_or(DateTime::<Utc>::MIN_UTC)
}
pub fn unread_count(&self) -> usize {
self.entries.iter().filter(|e| e.unread).count()
}
fn update_title(&mut self, channel: &Channel) -> bool {
if let Some(t) = channel.title.as_ref() {
self.title = t.content.clone();
return true;
}
false
}
fn update_html_link(&mut self, channel: &Channel) -> bool {
// update html link
if let Some(l) = channel.links.first() {
self.html_link = l.href.clone();
return true;
}
false
}
pub fn entries(&self) -> &[Entry] {
self.entries.as_slice()
}
pub async fn pull(&mut self) -> Result<PullStatus, PullError> {
let content = reqwest::get(self.link.clone()).await?.bytes().await?;
let channel = parser::parse(&content[..])?;
// update title
if !self.update_title(&channel) {
return Err(PullError::TitleUpdate);
}
// update html link
if !self.update_html_link(&channel) {
return Err(PullError::LinkUpdate);
};
// fetch new entries
let (entries, errors): (Vec<_>, Vec<_>) = channel
.entries
.iter()
.map(|e| Entry::try_from(e, &self.link))
.partition(Result::is_ok);
// pull status
let title = self.title.clone();
let count = entries.len().saturating_sub(self.total_count());
let errors = errors.into_iter().map(Result::unwrap_err).collect();
let pull_status = PullStatus::new(title, count, errors);
// update entries
self.entries = entries.into_iter().map(Result::unwrap).collect();
self.entries.sort_by(|a, b| b.published.cmp(&a.published));
Ok(pull_status)
}
}
impl fmt::Display for Feed {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {} {}",
self.last_updated().format(crate::DATE_FMT),
Style::new().dimmed().paint(self.title.to_ascii_lowercase()),
Style::new()
.fg(Color::Cyan)
.paint(self.entries.len().to_string()),
)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Entry {
pub title: String,
pub link: Url,
pub published: DateTime<Utc>,
pub unread: bool,
}
impl Entry {
fn try_from(e: &ChannelEntry, feed_url: &Url) -> Result<Self, EntryError> {
let title = e
.title
.as_ref()
.map(|t| t.content.clone())
.ok_or(EntryError::MissingTitle)?;
let raw_link = e
.links
.first()
.map(|l| l.href.clone())
.ok_or(EntryError::MissingLink)?;
let link = Url::parse(&raw_link)
.or_else(|_| feed_url.join(&raw_link))
.map_err(|_| EntryError::InvalidLink)?;
let published = e
.published
.or(e.updated)
.ok_or(EntryError::MissingPubDate)?;
Ok(Self {
title,
link,
published,
unread: true,
})
}
}
impl fmt::Display for Entry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {} {}",
self.published.format(crate::DATE_FMT),
Style::new().dimmed().paint(self.title.to_ascii_lowercase()),
Style::new().fg(Color::Cyan).paint(
self.link
.as_str()
.trim_end_matches('/')
.trim_start_matches("http://www.")
.trim_start_matches("https://www.")
.trim_start_matches("https://")
.trim_start_matches("http://")
),
)
}
}
|