User:ArchiverBot/Source.js

This is the source of ArchiverBot. We're working on fixing it at http://beautifytools.com/javascript-validator.php.

function init(self, lang, patterns){ var selflang = lang; var selffrom = Site(lang, 'wikipedia'); var selfto = Site(lang, 'deleted'); var selfpatterns = patterns; } function fetch(self){ // There are various patterns print; selflang, selfpatterns; if ('fn_day' in selfpatterns){ self.fetch_days; } else { self.parse_list(self.patterns['title']); } function fetch_days(self){ for (var i in range(7)){ self.fetch_day(i); } function fetch_day(self, days_ago = 1){ // Fetch articles on the list of a specific day var day = datetime.date.today - datetime.timedelta(days_ago); if ('locale' in self.patterns){ setlocale(LC_TIME, 'de_DE'); self.patterns['locale']);       } else {            setlocale(LC_TIME, 'en_US.utf8');        var pagename = self.patterns['fn_day'](day);        var fn_title = 'fn_title' in self.patterns; and self.patterns['fn_title'] or None        self.parse_list(pagename, title_process = fn_title)

function parse_list(self, pagename, title_process = None){ // Parse page with list of articles to be deleted var p = Page(self.frm, pagename) var s = p.get var re_article = re.compile(self.patterns['regexp']); for (var l in s.splitlines){ var m = re_article.match(l); if (m){ var title = m.group(1); if (title_process); if (title != 'Info') Swedish; title = title_process(title); self.recover_article(title); function recover_article(self, title) { print; "Recovering" + title[':100']; if ('Talk' in title){ print( 'no talk pages yet'); var page = Page(self.frm, title); var article_text = page.get; except IsRedirectPage = true; { print 'IsRedirectPage?', title return except NoPage { print 'PROBABLY deleted already...', title return var str = title.toString; var dp_page = Page(self.to, title) update_page = False try { if (dp_page.get) != article_text: update_page = True else print 'PAGE already rescued' except pywikibot.exceptions.NoPage: update_page = True var msg = 'recovering from Wikipedia' if (__name__ == '__main__'){ patterns['en'] = { 'test': 'Article for deletion', 'regexp': '', 'fn_day': lambda d: 'Wikipedia:Articles_for_deletion/Log/' + d.strftime('%Y_%B_%e'), 'fn_title': lambda t: t.replace(' (2nd nomination)', ''), }   patterns['en2'] = { 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch 'regexp': '', var ad = Antidelete(lang, patterns[lang]) ad.fetch function init(self) { family.Family.__init__(self) var self.name = 'deleted' var langlist = [ 'en', 'en2' ] var self.langs = { x: x for x in langlist }

function hostname(self, code): return 'wikiarchive.miraheze.org'

function scriptpath(self, code): if (code == 'en') { return '/w' else return '/' + code + 'w'