"""feedfinder: Find the Web feed for a Web page http://www.aaronsw.com/2002/feedfinder/ Usage: feed(uri) - returns feed found for a URI feeds(uri) - returns all feeds found for a URI >>> import feedfinder >>> feedfinder.feed('scripting.com') 'http://scripting.com/rss.xml' >>> >>> feedfinder.feeds('scripting.com') ['http://delong.typepad.com/sdj/atom.xml', 'http://delong.typepad.com/sdj/index.rdf', 'http://delong.typepad.com/sdj/rss.xml'] >>> Can also use from the command line. Feeds are returned one per line: $ python feedfinder.py diveintomark.org http://diveintomark.org/xml/atom.xml How it works: 0. At every step, feeds are minimally verified to make sure they are really feeds. 1. If the URI points to a feed, it is simply returned; otherwise the page is downloaded and the real fun begins. 2. Feeds pointed to by LINK tags in the header of the page (autodiscovery) 3. links to feeds on the same server ending in ".rss", ".rdf", ".xml", or ".atom" 4. links to feeds on the same server containing "rss", "rdf", "xml", or "atom" 5. links to feeds on external servers ending in ".rss", ".rdf", ".xml", or ".atom" 6. links to feeds on external servers containing "rss", "rdf", "xml", or "atom" 7. As a last ditch effort, we search Syndic8 for feeds matching the URI """ __version__ = "1.3" __date__ = "2006-04-02" __maintainer__ = "Aaron Swartz (me@aaronsw.com)" __author__ = "Mark Pilgrim (http://diveintomark.org)" __copyright__ = "Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz" __license__ = "Python" __credits__ = """Abe Fettig for a patch to sort Syndic8 feeds by popularity Also Jason Diamond, Brian Lalor for bug reporting and patches""" _debug = 0 import sgmllib, urllib, urlparse, re, sys, robotparser, signal class TimeoutException(Exception): pass class TimeoutFunction: def __init__(self, function, timeout): self.timeout = timeout self.function = function def handle_timeout(self, signum, frame): raise TimeoutFunctionException() def __call__(self, *args, **kw): old = signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.timeout) try: result = self.function(*args, **kw) finally: signal.signal(signal.SIGALRM, old) signal.alarm(0) return result # XML-RPC support allows feedfinder to query Syndic8 for possible matches. # Python 2.3 now comes with this module by default, otherwise you can download it try: import xmlrpclib # http://www.pythonware.com/products/xmlrpc/ except ImportError: xmlrpclib = None if not dict: def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc def _debuglog(message): if _debug: print message class RobotFileParserFixed(robotparser.RobotFileParser): """patched version of RobotFileParser, integrating fixes from Python 2.3a2 and bug 690214""" def can_fetch(self, useragent, url): """using the parsed robots.txt decide if useragent can fetch url""" if self.disallow_all: return 0 if self.allow_all: return 1 # search for given user agent matches # the first match counts url = urllib.quote(urlparse.urlparse(urllib.unquote(url))[2]) or "/" for entry in self.entries: if entry.applies_to(useragent): if not entry.allowance(url): return 0 # agent not found ==> access granted return 1 class URLGatekeeper: """a class to track robots.txt rules across multiple servers""" def __init__(self): self.rpcache = {} # a dictionary of RobotFileParserFixed objects, by domain self.urlopener = urllib.FancyURLopener() self.urlopener.version = "feedfinder/" + __version__ + " " + self.urlopener.version + " +http://www.aaronsw.com/2002/feedfinder/" _debuglog(self.urlopener.version) self.urlopener.addheaders = [('User-agent', self.urlopener.version)] robotparser.URLopener.version = self.urlopener.version robotparser.URLopener.addheaders = self.urlopener.addheaders def _getrp(self, url): protocol, domain = urlparse.urlparse(url)[:2] if self.rpcache.has_key(domain): return self.rpcache[domain] baseurl = '%s://%s' % (protocol, domain) robotsurl = urlparse.urljoin(baseurl, 'robots.txt') _debuglog('fetching %s' % robotsurl) rp = RobotFileParserFixed(robotsurl) rp.read() self.rpcache[domain] = rp return rp def can_fetch(self, url): rp = self._getrp(url) allow = rp.can_fetch(self.urlopener.version, url) _debuglog("Gatekeeper examined %s and said %s" % (url, allow)) return allow def _get(self, url): if not self.can_fetch(url): return '' return self.urlopener.open(url).read() def get(self, *a, **kw): return TimeoutFunction(self._get, 10)(*a, **kw) _gatekeeper = URLGatekeeper() class BaseParser(sgmllib.SGMLParser): def __init__(self, baseuri): sgmllib.SGMLParser.__init__(self) self.links = [] self.baseuri = baseuri def normalize_attrs(self, attrs): attrs = [(k.lower(), sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v).strip()) for k, v in attrs] attrs = [(k, k in ('rel','type') and v.lower() or v) for k, v in attrs] return attrs def do_base(self, attrs): attrsD = dict(self.normalize_attrs(attrs)) if not attrsD.has_key('href'): return self.baseuri = attrsD['href'] def error(self, *a, **kw): pass # we're not picky class LinkParser(BaseParser): FEED_TYPES = ('application/rss+xml', 'text/xml', 'application/atom+xml', 'application/x.atom+xml', 'application/x-atom+xml') def do_link(self, attrs): attrsD = dict(self.normalize_attrs(attrs)) if not attrsD.has_key('rel'): return rels = attrsD['rel'].split() if 'alternate' not in rels: return if attrsD.get('type') not in self.FEED_TYPES: return if not attrsD.has_key('href'): return self.links.append(urlparse.urljoin(self.baseuri, attrsD['href'])) class ALinkParser(BaseParser): def start_a(self, attrs): attrsD = dict(self.normalize_attrs(attrs)) if not attrsD.has_key('href'): return self.links.append(urlparse.urljoin(self.baseuri, attrsD['href'])) def makeFullURI(uri): if (not uri.startswith('http://')) and (not uri.startswith('https://')): uri = 'http://%s' % uri return uri def getLinks(data, baseuri): p = LinkParser(baseuri) p.feed(data) return p.links def getALinks(data, baseuri): p = ALinkParser(baseuri) p.feed(data) return p.links def getLocalLinks(links, baseuri): baseuri = baseuri.lower() urilen = len(baseuri) return [l for l in links if l.lower().startswith(baseuri)] def isFeedLink(link): return link[-4:].lower() in ('.rss', '.rdf', '.xml', '.atom') def isXMLRelatedLink(link): link = link.lower() return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom') def couldBeFeedData(data): data = data.lower() if data.count(' links that point to feeds _debuglog('no LINK tags, looking at A tags') try: links = getALinks(data, fulluri) except: links = [] locallinks = getLocalLinks(links, fulluri) # look for obvious feed links on the same server feeds = filter(isFeed, filter(isFeedLink, locallinks)) if not feeds: # look harder for feed links on the same server feeds = filter(isFeed, filter(isXMLRelatedLink, locallinks)) if not feeds: # look for obvious feed links on another server feeds = filter(isFeed, filter(isFeedLink, links)) if not feeds: # look harder for feed links on another server feeds = filter(isFeed, filter(isXMLRelatedLink, links)) if not feeds and querySyndic8: # still no luck, search Syndic8 for feeds (requires xmlrpclib) _debuglog('still no luck, searching Syndic8') feeds = getFeedsFromSyndic8(uri) return feeds getFeeds = feeds # backwards-compatibility def feed(uri): #todo: give preference to certain feed formats feedlist = feeds(uri) if feedlist: return feedlist[0] else: return None ##### test harness ###### def test(): uri = 'http://diveintomark.org/tests/client/autodiscovery/html4-001.html' failed = [] count = 0 while 1: data = _gatekeeper.get(uri) if data.find('Atom autodiscovery test') == -1: break sys.stdout.write('.') count += 1 links = getLinks(data, uri) if not links: print '\n*** FAILED ***', uri, 'could not find link' failed.append(uri) elif len(links) > 1: print '\n*** FAILED ***', uri, 'found too many links' failed.append(uri) else: atomdata = urllib.urlopen(links[0]).read() if atomdata.find('