123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110 |
- import urllib.parse
- import re
- import feedparser
- import time
- from datetime import datetime
- urllib.parse.uses_relative.append("gemini")
- urllib.parse.uses_netloc.append("gemini")
- def _cw(text):
- return re.sub(r'\s', ' ', text)
- def parsegemsub(feed, baseurl):
- entries = []
- authorpattern = r'^#\s*([^#\r\n]+)'
- entriespattern = r'^=>\s*(\S+)\s+(\d{4}-\d{2}-\d{2})[^\r\n\S]*([^\r\n]*)'
- entriespatternmatches = re.findall(entriespattern, feed, re.MULTILINE)
- authorpatternmatch = re.findall(authorpattern, feed, re.MULTILINE)
- if authorpatternmatch:
- author = authorpatternmatch[0]
- else:
- return None
- for entrypatternmatch in entriespatternmatches:
-
- try:
- updated = int(datetime.timestamp(datetime.strptime(entrypatternmatch[1] + " 12:00:00", "%Y-%m-%d %H:%M:%S")))
- except:
- continue
-
- link = urllib.parse.urljoin(baseurl, entrypatternmatch[0]).replace('/..','').replace('/.','')
- title = entrypatternmatch[2] if entrypatternmatch[2] else entrypatternmatch[1]
- entries.append(FeedEntry(baseurl, author, updated, title, link))
- return entries
- def parsetwtxt(feed, baseurl):
- entries = []
- authorpattern = r'^#\s*nick\s*=\s*(\S+)'
-
- entriespattern = r'^(\S+)\t([^\r\n]+)'
- entriespatternmatches = re.findall(entriespattern, feed, re.MULTILINE)
- authorpatternmatch = re.findall(authorpattern, feed, re.MULTILINE)
- if authorpatternmatch:
- author = authorpatternmatch[0]
- else:
- author = baseurl
- for entrypatternmatch in entriespatternmatches:
-
- try:
- posted = int(datetime.timestamp(datetime.strptime(entrypatternmatch[0], "%Y-%m-%dT%H:%M:%S%z")))
- except:
- continue
- entries.append(TwtxtEntry(feedurl = baseurl, author = author, posted = posted, twt = entrypatternmatch[1]))
- return entries
- def parsexml(feed, baseurl):
- scheme = baseurl.split("://")[0]
- entries = []
- parsedfeed = feedparser.parse(feed)
-
- feedauthor = _cw(parsedfeed['feed']['author_detail']['name']) if parsedfeed['feed'].has_key('author_detail') and parsedfeed['feed']['author_detail'].has_key('name') else None
- feedtitle = _cw(parsedfeed['feed']['title']) if parsedfeed['feed'].has_key('title') else None
- if not feedauthor and feedtitle:
- feedauthor = feedtitle
- if not parsedfeed.has_key('entries'):
- return None
- for entry in parsedfeed['entries']:
- try:
- if entry.has_key('author_detail') and entry['author_detail'].has_key('name'):
- author = _cw(entry['author_detail']['name'])
- elif feedauthor:
- author = feedauthor
- else:
- continue
- updated = int(time.mktime(entry['updated_parsed']))
- title = _cw(entry['title'])
- if len(entry['links']) > 1:
- link = [l for l in entry['links'] if l['href'].startswith(scheme)][0]['href']
- else:
- link = _cw(entry['link'])
- if not link:
- continue
- link = urllib.parse.urljoin(baseurl, link).replace('/..','').replace('/.','')
- except:
- continue
- entries.append(FeedEntry(baseurl, author, updated, title, link))
- return entries
- class FeedEntry():
- def __init__(self, feedurl, author, updated, title, link):
- self.feedurl = feedurl
- self.author = author
- self.updated = updated
- self.title = title
- self.link = link
- class TwtxtEntry():
- def __init__(self, feedurl, author, posted, twt):
- self.feedurl = feedurl
- self.author = author
- self.posted = posted
- self.twt = twt
|