summaryrefslogtreecommitdiff
path: root/indexer.py
blob: 57e1c0fb06228143908f6e161b427675e64dd6a0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#!/usr/bin/python2.6
# coding: utf-8
import os
import sys
import pyPdf
from whoosh.index import create_in, open_dir
import whoosh.fields as fields
import time
from cStringIO import StringIO
from Queue import Queue, Empty
import multiprocessing as mp

schema_book = fields.Schema(
    pagenumber=fields.NUMERIC(stored=True),
    metadata_docnum=fields.NUMERIC(stored=True),
    content=fields.TEXT(stored=True),
    )

schema_metadata = fields.Schema(
    title = fields.TEXT(stored=True),
    path=fields.ID(stored=True,unique=True),
    createtime=fields.NUMERIC(stored=True) )

if not os.path.exists(u"index"):
    create_index = True
    os.mkdir(u"index")
    index_book = create_in(u"index", schema_book, u"book")
    index_metadata = create_in(u"index", schema_metadata, u"metadata")
else:
    create_index = False
    index_book = open_dir(u"index", u"book")
    index_metadata = open_dir(u"index", u"metadata")


filepaths = []
directory = unicode(sys.argv[1], "utf8")
searcher_book = index_book.searcher()
searcher_metadata = index_metadata.searcher()
print u"Walking {0}".format(directory)
filecount = 0
skipped = 0
for path, directories, files in os.walk(directory):
    for filename in files:
        if filename.endswith(u".pdf"):
            filepath = os.path.join(path, filename)
            docnum = create_index or searcher_metadata.document_number(path=filepath)
            if not docnum:
                skipped += 1
            else:
                filepaths.append(filepath)
                filecount += 1
            print u"\r{0} files found {1} skipped".format(filecount+skipped, skipped),
print ""

if not create_index: #update index for deleted files
    writer_book = index_book.writer()
    writer_metadata = index_metadata.writer()
    deleted = 0
    processed = 0
    for fields in searcher_metadata.all_stored_fields():
        path = fields['path']
        processed += 1
        if not os.path.exists(path):
            writer_book.delete_by_term(u'metadata_docnum', searcher_metadata.document_number(path=path))
            writer_metadata.delete_by_term('path', path)
            deleted += 1
        print u"\r{0} pages processed. {1} deleted".format(processed, deleted),
    print ""
    writer_book.commit()
    writer_metadata.commit()

searcher_book.close()
searcher_metadata.close()

def process_file(filepath):
    try:
        print u"{0} processing {1}".format(os.getpid(), filepath)
        inputfile = pyPdf.PdfFileReader(file(filepath, 'r'))
        title = inputfile.getDocumentInfo().title
        writer_metadata = index_metadata.writer()
        writer_metadata.add_document(title=title, path=filepath, createtime=time.time())
        writer_metadata.commit()
        searcher_metadata = index_metadata.searcher()
        metadata_docnum = searcher_metadata.document_number(path=filepath)
        searcher_metadata.close()

        pagenumber = 1
        for page in inputfile.pages:
            print u"{0} processing {1} Page {2}".format(os.getpid(), filepath, pagenumber)
            content = page.extractText()
            writer_book = index_book.writer()
            writer_book.add_document(pagenumber=pagenumber,
                                      metadata_docnum=metadata_docnum,
                                      content=content)
            writer_book.commit()                         
            pagenumber += 1
    except KeyboardInterrupt:
        return 'KeyboardInterrupt'
    except Exception,e:
        print u"{0} failed at {1}: {2}".format(os.getpid(), filepath, e)

try:
    pool = mp.Pool()
    jobs = []
    for filepath in filepaths:
        jobs.append( pool.apply_async( process_file, (filepath,) ) )
    pool.close()
    pool.join()
except KeyboardInterrupt:
    pool.terminate()
except ImportError:
    for filepath in filepaths:
        process_file(filepath)
print u"optimize indexes"
index_metadata.optimize()
index_metadata.close()
index_book.optimize()
index_book.close()