Skip to content
buscmd.py 12.9 KiB
Newer Older
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import click
import urllib
import json
import os
import glob
import csv
import datetime
import io
import xlwt
import sys
import yaml
import shutil
import hashlib
import zlib
from pyparsing import Word, nums, alphanums, alphas, oneOf, lineStart, lineEnd, Optional, restOfLine, Literal, ParseException, CaselessLiteral
from urllib import URLopener

from pymongo import Connection, DESCENDING, ASCENDING

XMLTOJSON = '/usr/local/bin/xml-to-json'
#FILEPATH = '/home/_data/preservation/ftpmirrors/bus/bus.gov.ru/GeneralInfo'
LOCALPATH = '/data/ftp/bus.gov.ru/'
NSI_PATH = os.path.join(LOCALPATH, 'all')

DATA_CATEGORY_GENERAL = 'GeneralInfo'
DATA_CATEGORY_ACTIONGRANT = 'ActionGrant'
DATA_CATEGORY_ACTIVERES = 'ActivityResult'
DATA_CATEGORY_BUDGETCIRC = 'BudgetaryCircumstances'
DATA_CATEGORY_DIVERSEINFO = 'DiverseInfo'
DATA_CATEGORY_FINACTIVITY = 'FinancialActivityPlan'
DATA_CATEGORY_INSACTIVITY = 'InspectionActivity'
DATA_CATEGORY_STATETASK = 'StateTask'
DATA_CATEGORY_REGPUBINFO = 'regPublishedInfo'
DATA_CATEGORY_ABF0503121 = 'annualBalanceF0503121'
DATA_CATEGORY_ABF0503127 = 'annualBalanceF0503127'
DATA_CATEGORY_ABF0503130 = 'annualBalanceF0503130'
DATA_CATEGORY_ABF0503137 = 'annualBalanceF0503137'
DATA_CATEGORY_ABF0503721 = 'annualBalanceF0503721'
DATA_CATEGORY_ABF0503730 = 'annualBalanceF0503730'
DATA_CATEGORY_ABF0503737 = 'annualBalanceF0503737'

DATA_NSI_BUDGETS = 'nsiBudget'
DATA_NSI_KLADR = 'nsiKladr'
DATA_NSI_KBK = 'nsiKbk'
DATA_NSI_KBKBUDGET = 'nsiKbkBudget'
DATA_NSI_OKATO = 'nsiOkato'
DATA_NSI_OKTMO = 'nsiOktmo'
DATA_NSI_OKER = 'nsiOker'
DATA_NSI_OKVED = 'nsiOkved'
DATA_NSI_OKFS = 'nsiOkfs'
DATA_NSI_OKOGU = 'nsiOkogu'
DATA_NSI_OKOPF = 'nsiOkopf'
DATA_NSI_SUBJECTSERVICE = 'nsiSubjectService'
DATA_NSI_PPO = 'nsiPpo'
DATA_NSI_OGS = 'nsiOgs'
DATA_NSI_FEDERALSERVICE = 'nsiFederalService'
DATA_NSI_INSTITUTIONTYPE = 'nsiInstitutionType'
DATA_NSI_IT = 'nsiIt'

NSI_LIST = [DATA_NSI_BUDGETS, DATA_NSI_KLADR, DATA_NSI_KBK, DATA_NSI_KBKBUDGET, DATA_NSI_OKATO, 
    DATA_NSI_OKTMO, DATA_NSI_OKER, DATA_NSI_OKVED, DATA_NSI_OKFS,
    DATA_NSI_OKOGU, DATA_NSI_OKOPF, DATA_NSI_SUBJECTSERVICE, DATA_NSI_PPO, DATA_NSI_OGS, DATA_NSI_FEDERALSERVICE,
    DATA_NSI_IT, DATA_NSI_INSTITUTIONTYPE
]

CAT_MAP = {
    DATA_CATEGORY_GENERAL : 'generalinfo',
    DATA_CATEGORY_ACTIONGRANT : 'actiongrant',
    DATA_CATEGORY_ACTIVERES : 'activeres',
    DATA_CATEGORY_BUDGETCIRC : 'budgetcirc',
    DATA_CATEGORY_DIVERSEINFO : 'diverseinfo',
    DATA_CATEGORY_FINACTIVITY : 'finactplan',
    DATA_CATEGORY_INSACTIVITY : 'insactivity',
    DATA_CATEGORY_STATETASK : 'statetask',
    DATA_CATEGORY_REGPUBINFO : 'regpubinfo',
    DATA_CATEGORY_ABF0503121 : 'afb0503121',
    DATA_CATEGORY_ABF0503127 : 'afb0503127',
    DATA_CATEGORY_ABF0503130 : 'afb0503130',
    DATA_CATEGORY_ABF0503137 : 'afb0503127',
    DATA_CATEGORY_ABF0503721 : 'afb0503721',
    DATA_CATEGORY_ABF0503730 : 'afb0503730',
    DATA_CATEGORY_ABF0503737 : 'afb0503737',
}

ARCHIVE_STATUS_UNPROCESSED = 1
ARCHIVE_STATUS_PROCESSING = 2
ARCHIVE_STATUS_PROCESSED = 3


TMPPATH = '/tmp/ram'

def calc_hash_crc(filename):
    """Calculate hash and crc32 of selected file"""
    data = open(filename, 'rb').read()
    fhash = hashlib.sha256(data).hexdigest()
    fcrc = zlib.crc32(data)
    return {'sha256': fhash, 'crc32' : fcrc}

class MongoLoader:
    def __init__(self, thepath):
        self.localpath = thepath
        self.conn = Connection()
        self.db = self.conn['bus']
        self.archcoll = self.db['archives']
        self.filecoll = self.db['files']
        pass

    def buildIndexes(self):
        self.archcoll.ensure_index([('obj_name', DESCENDING)])
        for name in CAT_MAP.values():
            self.db[name].ensure_index([('position.positionId', DESCENDING)])
        for name in NSI_LIST:
            self.db[name].ensure_index([('position.positionId', DESCENDING)])

    def stats(self):
        """Calc collections stats"""
        import tabulate
        keys = ['category', 'total_files', 'not_loaded', 'count', 'size']
        table = []
        print 'Collecting stats'
        cats = CAT_MAP.keys()
        cats.sort()
        total = ['total', 0, 0, 0, 0]
        for cat in cats:
            row = [cat]
            row.append(self.archcoll.find({'category' : cat}).count())
            row.append(self.archcoll.find({'category' : cat, 'status' : ARCHIVE_STATUS_UNPROCESSED}).count())
            stats = self.db.command("collstats", CAT_MAP[cat])
            row.append(stats['count'])
            row.append(float(stats['size']) / (1024*1024))
            table.append(row)

        NSI_LIST.sort()
        for cat in NSI_LIST:
            row = [cat]
            row.append(self.archcoll.find({'category' : cat}).count())
            row.append(self.archcoll.find({'category' : cat, 'status' : ARCHIVE_STATUS_UNPROCESSED}).count())
            stats = self.db.command("collstats", cat)
            row.append(stats['count'])
            row.append(float(stats['size']) / (1024*1024))
            table.append(row)
        
        for row in table:            
            total[1] += row[1]
            total[2] += row[2]
            total[3] += row[3]
            total[4] += row[4]
        table.append(total) 
        print tabulate.tabulate(table, headers=keys)


    def loadNSI(self):
        """Loads all NSI to DB"""
        for nsiname in NSI_LIST:
            print nsiname
#            if nsiname != DATA_NSI_KBKBUDGET: continue
            ffilter = NSI_PATH + '/' + nsiname + '_*.zip'
#            print ffilter
            files = glob.glob(ffilter)
            for pname in files:
                obj_name = pname.rsplit('/', 1)[1]
                obj = self.archcoll.find_one({'obj_name' : obj_name})
                if not obj:
                    obj = {'obj_name' : obj_name, 'filepath' : pname, 'category' : nsiname, 'filesize' : os.stat(pname)[6], 'status' : ARCHIVE_STATUS_UNPROCESSED}
                    obj.update(calc_hash_crc(pname))
                    self.archcoll.save(obj)
                    print '-', pname, ':', obj_name, 'saved'
                else:
                    print '-', pname, 'already collected'
            pass
        pass            


    def findArchives(self, category=DATA_CATEGORY_GENERAL, dbupdate=True):
        """Collect all archives by category"""
        print 'Collecting source data from', self.localpath
        pathname = os.path.join(self.localpath, category)
#        cat_dirs = os.listdir(self.localpath)
        for dirname, dirnames, filenames in os.walk(pathname):
            for filename in filenames:
                pname = os.path.join(dirname, filename)
                ext = pname.rsplit('.', 1)[-1].lower()
                if ext == 'zip':
                    region = pname.split('/')[-2]
                    obj_name = '/'.join(pname.split('/')[-3:])
                    obj = self.archcoll.find_one({'obj_name' : obj_name})
                    if not obj:
                        obj = {'obj_name' : obj_name, 'filepath' : pname, 'category' : category, 'region' : region, 'filesize' : os.stat(pname)[6], 'status' : ARCHIVE_STATUS_UNPROCESSED}
                        obj.update(calc_hash_crc(pname))
                        self.archcoll.save(obj)
                        print '-', pname, ':', obj_name, 'saved'
                    else:
                        print '-', pname, 'already collected'
#                        print obj

    def process_category(self, category, force=False, nocheck=False):
        """Process selected category"""
        all = []
        if not force:
            objects = self.archcoll.find({'category' : category, 'status' : {'$in' : [ARCHIVE_STATUS_UNPROCESSED, ARCHIVE_STATUS_PROCESSING]}})
            print 'Total unprocessed archives:', objects.count()
        else:
            objects = self.archcoll.find({'category' : category})
            print 'Total archives', objects.count()
        for o in objects:
            all.append(o)
        for o in all:
            self.process_archive(o, force=force, nocheck=nocheck)
        pass

    def process_archive(self, o, force=False, nocheck=False):
        if o['status'] == ARCHIVE_STATUS_UNPROCESSED:
            nocheck = True
        elif o['status'] == ARCHIVE_STATUS_PROCESSING:
            nocheck = False
        pname = o['filepath']
        name = pname.rsplit('/', 1)[-1]
        finndame = os.path.join(TMPPATH, name)
        try:
            os.mkdir(finndame)
        except:
            pass
        o['status'] = ARCHIVE_STATUS_PROCESSING
        tempname = os.path.join(finndame, name)
        shutil.copy(pname, tempname)
        cwd = os.getcwd()
        os.chdir(finndame)
        os.system('7za x -y %s 1> /dev/null 2>/dev/null' % (name))
        files = os.listdir(finndame)
        allfiles = []
        for fname in files:
            allfiles.append({'fname' : fname})
            self.process_file(o['category'], o['region'] if o.has_key('region') else None, name, os.path.abspath(fname), nocheck=nocheck, force=force)
        o['status'] = ARCHIVE_STATUS_PROCESSED
        o['files'] = allfiles
        o['nfiles'] = len(files)
        self.archcoll.save(o)
        os.chdir(cwd)
        shutil.rmtree(finndame)
        print pname, 'processed'
        pass

    def process_file(self, category, region, archivename, filename, nocheck=False, force=True):
        collname = self.map_category(category)
        coll = self.db[collname]
        name, ext = filename.rsplit('.', 1)
        if ext == 'xml':
            if region:
                id = '/'.join([category, region, archivename, filename.rsplit('/', 1)[-1]])
            else:
                id = '/'.join([category, archivename, filename.rsplit('/', 1)[-1]])
                nocheck = True 
            if nocheck:           
                data = None 
            else:
                data = coll.find_one({'processed.id' : id})
            found = data is not None
            if data and not force:
                return

            os.system(XMLTOJSON + ' -t ns2:position %s > %s' % (filename, name + '.json'))
            f = file(name + '.json', 'r')
            objects = []
            if region:
                try:
                    fdata = json.loads(f.read())
                    objects.append(fdata)
                except ValueError:
                    return
            else:
                for r in f:
                    fdata = json.loads(r)
                    objects.append(fdata)            
            if len(objects) == 1 and not nocheck:
                if data is None: data = objects[0]
                else: data.update(objects[0])
                proc = {'id': id, 'category' : category, 'region' : region, 'xml' : filename, 'archive': archivename}
                data['processed'] = proc
                coll.save(data)
            else:
                for o in objects:
                    proc = {'id': id, 'category' : category, 'region' : region, 'xml' : filename, 'archive': archivename}
                    o['processed'] = proc
                    coll.save(o)
            if found:
                print '-', name, 'updated'
            else:
                print '-', name, 'saved'
        pass

    def map_category(self, category):
        """Returns collection name on category"""
        v = CAT_MAP.get(category, None)
        if v is None:
            if category in NSI_LIST:
                return category
            else:
                return None
        else:
            return v

    def reset(self):
        """Drops databases"""
        print 'Reset database'
        self.archcoll.drop()
        print 'archive collection dropped'
        for name in CAT_MAP.values():
            print name, 'collection dropped'
            self.db[name].drop()


@click.group()
def cli1():
    pass

@cli1.command()
@click.option('--cat', default=DATA_CATEGORY_GENERAL)
def listdata(cat):
    """Lists all source files"""
    loader = MongoLoader(LOCALPATH)
    loader.buildIndexes()
    loader.findArchives(cat)
    pass

@click.group()
def cli2():
    pass

@cli2.command()
@click.option('--cat', default=DATA_CATEGORY_GENERAL)
@click.option('--force', default=False)
@click.option('--nocheck', default=False)
def process(cat, force, nocheck):
    """Process archive files"""
    force = bool(force)
    nocheck = bool(nocheck)
    loader = MongoLoader(LOCALPATH)
    #loader.findArchives(DATA_CATEGORY_GENERAL)
    loader.buildIndexes()
    loader.process_category(cat, nocheck=nocheck, force=force)
    pass


@click.group()
def cli3():
    pass

@cli3.command()
def reset():
    """Drop databases"""
    loader = MongoLoader(LOCALPATH)
    loader.reset()


@click.group()
def cli4():
    pass

@cli4.command()
def stats():
    """Statistics"""
    loader = MongoLoader(LOCALPATH)
    loader.buildIndexes()
    loader.stats()
    pass

@click.group()
def cli5():
    pass

@cli5.command()
def loadnsi():
    """Load all NSI data"""
    loader = MongoLoader(LOCALPATH)
    loader.loadNSI()
    pass


cli = click.CommandCollection(sources=[cli1, cli2, cli3, cli4, cli5])

if __name__ == '__main__':
    cli()