#!/usr/bin/python2.6 # coding: utf-8 import os import sys import pyPdf from whoosh.index import create_in, open_dir import whoosh.fields as fields import time from cStringIO import StringIO from Queue import Queue, Empty import multiprocessing as mp schema_book = fields.Schema( pagenumber=fields.NUMERIC(stored=True), metadata_docnum=fields.NUMERIC(stored=True), content=fields.TEXT(stored=True), ) schema_metadata = fields.Schema( title = fields.TEXT(stored=True), path=fields.ID(stored=True,unique=True), createtime=fields.NUMERIC(stored=True) ) if not os.path.exists(u"index"): create_index = True os.mkdir(u"index") index_book = create_in(u"index", schema_book, u"book") index_metadata = create_in(u"index", schema_metadata, u"metadata") else: create_index = False index_book = open_dir(u"index", u"book") index_metadata = open_dir(u"index", u"metadata") filepaths = [] directory = unicode(sys.argv[1], "utf8") searcher_book = index_book.searcher() searcher_metadata = index_metadata.searcher() print u"Walking {0}".format(directory) filecount = 0 skipped = 0 for path, directories, files in os.walk(directory): for filename in files: if filename.endswith(u".pdf"): filepath = os.path.join(path, filename) docnum = create_index or searcher_metadata.document_number(path=filepath) if not docnum: skipped += 1 else: filepaths.append(filepath) filecount += 1 print u"\r{0} files found {1} skipped".format(filecount+skipped, skipped), print "" if not create_index: #update index for deleted files writer_book = index_book.writer() writer_metadata = index_metadata.writer() deleted = 0 processed = 0 for fields in searcher_metadata.all_stored_fields(): path = fields['path'] processed += 1 if not os.path.exists(path): writer_book.delete_by_term(u'metadata_docnum', searcher_metadata.document_number(path=path)) writer_metadata.delete_by_term('path', path) deleted += 1 print u"\r{0} pages processed. {1} deleted".format(processed, deleted), print "" writer_book.commit() writer_metadata.commit() searcher_book.close() searcher_metadata.close() def process_file(filepath): try: print u"{0} processing {1}".format(os.getpid(), filepath) inputfile = pyPdf.PdfFileReader(file(filepath, 'r')) title = inputfile.getDocumentInfo().title writer_metadata = index_metadata.writer() writer_metadata.add_document(title=title, path=filepath, createtime=time.time()) writer_metadata.commit() searcher_metadata = index_metadata.searcher() metadata_docnum = searcher_metadata.document_number(path=filepath) searcher_metadata.close() pagenumber = 1 for page in inputfile.pages: print u"{0} processing {1} Page {2}".format(os.getpid(), filepath, pagenumber) content = page.extractText() writer_book = index_book.writer() writer_book.add_document(pagenumber=pagenumber, metadata_docnum=metadata_docnum, content=content) writer_book.commit() pagenumber += 1 except KeyboardInterrupt: return 'KeyboardInterrupt' except Exception,e: print u"{0} failed at {1}: {2}".format(os.getpid(), filepath, e) try: pool = mp.Pool() jobs = [] for filepath in filepaths: jobs.append( pool.apply_async( process_file, (filepath,) ) ) pool.close() pool.join() except KeyboardInterrupt: pool.terminate() except ImportError: for filepath in filepaths: #if process_file(filepath) == "KeyboardInterrupt": break print u"optimize indexes" index_metadata.optimize() index_metadata.close() index_book.optimize() index_book.close()