Friday 27 April 2012

Reading Microsoft Word Doc Files in Python

Thought I'd post this although it's far from finished. I've been trying to pull text data from various formats using as few external libraries as I can.

The word document format (Word 95 up to Word 2003) is a pretty complex format, especially when compared to the Microsoft Works WPS format (code to read this has been posted previously - this code also has the OleDocument class needed for the code below).

Basic code follows below, but I need to stress that this code is NOT 100% yet. The way I deal with text, both 16 and 8 bit isn't great and I'm sure there's more nasties that the format can add to the text as well as how it encodes tables and hyperlinks. As I'm going to have to leave this code for a while before I improve it (other priorities) I thought I'd post it as-is in case I forget to do so later.

#!/usr/bin/env python
import sys
import os
import re
import struct

DOCNONASCIIPATTERN8 = re.compile(r"[\x7F-\xFF]")
DOCNONASCIIPATTERN16 = re.compile(ur"[\u007F-\uFFFF]")
DOCTABLECLEAN = re.compile(r"[\x01-\x08]")
DOCSTRIPPATTERN = re.compile(r"\r")
DOCHYPERLINKPATTERN = re.compile(
    r"\x13.*HYPERLINK.*\"(?P<uri>.*)\".*\x14(?P<display>.*)\x15")

from OleDocument import OleDocument, ReaderError

class DOCReader(object):
   
    def __init__(self, file_name):
        self.file_name = file_name
        self.document = OleDocument(file_name)
        self.non_ascii_pattern8 = DOCNONASCIIPATTERN8
        self.non_ascii_pattern16 = DOCNONASCIIPATTERN16
        self.table_cleanup = DOCTABLECLEAN
        self.strip_pattern = DOCSTRIPPATTERN
        self.hyperlink_pattern = DOCHYPERLINKPATTERN
        self.file_version = "Unknown Version"
               
    def extract_text(self):
        #First we need to pull out the WordDocument stream
        #THis has most of the data we need
        doc_stream = self.document.read_stream("WordDocument")
        #The magic and version words define what version document this is
        #We dont handle pre-word 6 documents
        magic, version, flags = struct.unpack("<HH6xH", doc_stream[:12])
        if magic != 0xA5EC and magic != 0xA5DC:
            raise ReaderError("Invalid format - not a Word doc file")
        if version < 101:
            raise ReaderError("Very old doc file - cant handle before Word 95")
        elif version == 101 or version in range(103, 105):
            self.file_version = "Word 95"
            buff = self._process_word95(doc_stream)
        elif version >= 193:
            self.file_version = "Word 97 - 2003"
            buff = self._process_word97(doc_stream, flags)
        else:
            raise ReaderError("Unknown version of Word")
        return buff
   
    def _clean_hyperlinks(self, buff):
        #Word marks up hyperlinks with a certain markup.
        #We want to strip this out, pull out the hyperlink text and uri,
        # then add this to the text
        for match in self.hyperlink_pattern.finditer(buff):
            uri, display = match.groups()
            buff = self.hyperlink_pattern.sub("%s (link: %s)" % (display, uri),
                                              buff, 1)
        return buff
   
    def _process_word95(self, doc_stream):
        #This version is so much easier to handle!
        #The text start offset and end offset are early on in the stream.
        #Pull them out, try clean up the text (seems to be ascii) and thats it
        text_start, text_end = struct.unpack_from("<II", doc_stream, 0x18)
        buff = doc_stream[text_start:text_end]
        buff = self.non_ascii_pattern8.sub("", buff)
        buff = self.table_cleanup.sub(" ", unicode(buff , "utf8"))
        buff = self._clean_hyperlinks(buff)
        return self.strip_pattern.sub("\r\n", buff)
       
    def _process_word97(self, doc_stream, flags):
        #This is where it gets ugly!
        #Depending on the flags, you need to pull out another stream
        #Its almost always '1Table'
        if flags & 0x40:
            table_stream_name = "1Table"
        else:
            table_stream_name = "0Table"
        #Now, from the WordDocument stream pull out the size of the text
        #If there's any text in headers etc... then we need to add the extra
        # amount of text along with 1 extra char (Dont know why the extra 1!!!)
        offset = 62
        count = struct.unpack_from("<H", doc_stream, offset)[0]
        offset += 2
        text_size, foot_size, header_size, macro_size, annotation_size, \
            endnote_size, textbox_size, headertextbox_size = \
            struct.unpack_from("12x8I", doc_stream, offset)
        #If any sizes other than text size are non zero, add them up and add 1
        if foot_size or header_size or macro_size or annotation_size or \
                endnote_size or textbox_size or headertextbox_size:
            final_cp = text_size + foot_size + header_size + macro_size + \
                annotation_size + endnote_size + textbox_size + \
                headertextbox_size + 1
        else:
            final_cp = text_size
        #Skip across some unused structures to get an offset to the table stream
        offset += (count * 4)
        offset += (66 * 4) + 2 #Add offset from main block + count variable
        clx_offset, clx_size = struct.unpack_from("<II", doc_stream, offset)
        table_stream = self.document.read_stream(table_stream_name)
        magic, size = struct.unpack_from("<BH", table_stream, clx_offset)
        if magic != 0x02:
            raise ReaderError("Not a valid clxt in the table stream")
        #Now read a list of cp offsets showing how the text is broken up
        cp_list = []
        offset = clx_offset + 5
        for i in range(size / 4):
            cp = struct.unpack_from("<I", table_stream, offset)[0]
            cp_list.append(cp)
            offset += 4
            if cp == final_cp:
                break
        if i == (size / 4) - 1:
            raise ReaderError("Parse error - doc file has no final cp")
        #For each cp offset we need to see if the text is 8 or 16 bit, get a
        # stream offset and process the text chunk
        buff = u""
        for i in range(len(cp_list[:-1])):
            fc = struct.unpack_from("<2xI", table_stream, offset)[0]
            stream_offset = fc & (0xFFFFFFFF >> 2)
            compressed = fc & (0x01 << 30)
            next_cp = cp_list[i + 1]
            cp = cp_list[i]
            buff += self._process_block97(stream_offset, cp, next_cp, compressed,
                                            doc_stream)
            offset += 8
        return self.strip_pattern.sub("\r\n", buff)
           
    def _process_block97(self, text_offset, cp, next_cp, compressed,
                         doc_stream):
        #For each text block we need to read the data and try clean it up.
        #The data has special markup for tables and hyperlinks as well as other
        # stuff that can be quite nasty of you dont clean it up
        if compressed:
            text_offset /= 2
            last = (text_offset) + next_cp - cp - 1
            buff = self.non_ascii_pattern8.sub("", doc_stream[text_offset:last])
            buff = self.table_cleanup.sub(" ", unicode(buff , "utf8"))
            return self._clean_hyperlinks(buff)
        else:
            last = text_offset + 2 * (next_cp - cp)
            buff = doc_stream[text_offset:last]
            buff = unicode(buff , "utf16", errors="replace")
            buff = self._clean_hyperlinks(buff)
            buff = self.non_ascii_pattern16.sub("", buff)
            return self.table_cleanup.sub(" ", buff)           

if __name__ == '__main__':
    print DOCReader(sys.argv[1]).extract_text()
   

Sunday 22 April 2012

WPS File Reader in Python (Round 2)

I've started work on pulling text from the Microsoft Word format (pre Office 2007 doc files) using only python. I had already completed a "passable" Microsoft Works (wps file) reader, but took a few liberties with the format and avoided reading the file properly via the Ole Compact file specification. A few hours messing with the doc file via my hex editor told me that I cant avoid doing things properly this time. So I wrote a rather simplified OleDocument class that simply parses the file and allows extraction of streams (it doesn't touch the mini streams and leaves a few things out that I don't so far need).

So here's the updated wps code with the new OleDocument class. The WPSReader is a little simpler now and seems far more robust. It still needs testing though!

Now I need to start on the doc format. That's going to be far more difficult!

#!/usr/bin/env python
import os
import sys
import struct
import re
from collections import namedtuple

WPSSTRIPPATTERN = re.compile(r"\r")

class ReaderError(Exception): pass

class OleDocument(object):
   
    def __init__(self, file_name):
        self.file_name = file_name
        self.sectors = []
        self.directories = {}
        self._parse_contents()
       
    def _read_fat_sector(self, fat_sector, fd):
        fd.seek(self.sector_size*(fat_sector+1), os.SEEK_SET)
        for i in range(self.sector_size / 4):
            sector = struct.unpack("<I", fd.read(4))[0]
            yield sector               
       
    def _parse_contents(self):
        with open(self.file_name, "rb") as fd:
            sig = fd.read(8)
            if sig != "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1":
                raise ReaderError("Not a valid Ole Storage Document")
            header = fd.read(68)
            sector_shift, mini_sector_shift, fat_sector_count, \
                first_dir_sector, first_mini_sector, mini_sector_count,  \
                = struct.unpack("<22xHH10xII8xII8x", header)
            self.sector_size = 1 << sector_shift
            self.mini_sector_size = 1 << mini_sector_shift
            fat_sectors = []
           
            for i in range(fat_sector_count):
                fat_sectors.append(struct.unpack("<I", fd.read(4))[0])
            for fat_sector in fat_sectors:
                for sector in self._read_fat_sector(fat_sector, fd):
                    self.sectors.append(sector)
               
            #Now read the directories
            buff = ''
            for count, dir_sector in \
                    enumerate(self._get_sectors(first_dir_sector)):
                fd.seek(self.sector_size*dir_sector+self.sector_size,
                        os.SEEK_SET)
                buff += fd.read(self.sector_size)
            for i in range((count+1)*4):
                name, sector, size = struct.unpack("<64s52xII4x",
                                                   buff[i*128:(i+1)*128])
                name = re.sub("\x00", "", unicode(name, "UTF16"))
                self.directories[name] = (sector, size)
               
    def _get_sectors(self, sector):
        while True:
            if sector == 0xFFFFFFFE: #Last directory
                break
            yield sector
            sector = self.sectors[sector]
                       
                               
    def read_stream(self, name):
        name = unicode(name)
        if name not in self.directories:
            raise ReaderError("No stream called %s" % name)
        start, size = self.directories[name]
        buff = ""
        with open(self.file_name, "rb") as fd:
            for sector in self._get_sectors(start):
                fd.seek(self.sector_size*sector+self.sector_size, os.SEEK_SET)
                buff += fd.read(self.sector_size)
                size -= self.sector_size
                if size <= 0:
                    break
        return buff

class WPSReader(object):
   
    def __init__(self, file_name):
        self.document = OleDocument(file_name)
        self.strip_pattern = WPSSTRIPPATTERN

    def _process_entries(self, entry_buff):
        magic, local, next_offset = struct.unpack("<HHI", entry_buff[:8])
        if magic != 0x01F8:
            raise ReaderError("Invalid format - Entry magic tag incorrect")
        entry_pos = 0x08 #2 WORDS & 1 DWORD
        for i in range(local):
            size = struct.unpack("<H", entry_buff[entry_pos:entry_pos+0x2])[0]
            name, offset, entry_size = struct.unpack("<2x4s10xII",
                                        entry_buff[entry_pos:entry_pos+size])
            if name == "TEXT": #Success!
                return (local, 0x00, offset, entry_size)
            entry_pos += size
        return (local, next_offset, 0x00, 0x00) #Needs to be run again
       
    def extract_text(self):
        buff = self.document.read_stream("CONTENTS")
        total_entries = struct.unpack("<12xH",  buff[:14])[0]
        entries_pos = 24
        while True:
            entries, next_offset, text_header_offset, text_size = \
                self._process_entries(buff[entries_pos:])          
            if text_size: #TEXT found
                break
            total_entries -= entries
            if total_entries and next_offset:
                entries_pos = next_offset #Move to next block
            else:
                raise ReaderError("Unable to find TEXT secion. File corrupt?")
        text = buff[text_header_offset:text_header_offset+text_size]
        return self.strip_pattern.sub("\r\n", unicode(text, "UTF16"))
       
       
if __name__ == '__main__':
    reader = WPSReader(sys.argv[1])
    print reader.extract_text()
   

Wednesday 18 April 2012

Reading Microsoft Works WPS files in Python

* The code below has been updated for a better wps reader go here *

I'm trying to extract text from various file formats for a search engine and trying to avoid any external packages. I have 10 thousand files to extract text from and a lot of them are WPS files (Microsoft Works - you know, that free office suite that comes preinstalled on many windows boxes).

I was opening the files and using regular expressions and some text sanitizing to try and get decent text from the file. Unfortunately, the text is split into blocks so I got some part-words as long as font names and other junk words from the metadata of the files. I had used libwps in the past but didn't want the dependency in my code. Most Windows based document formats from that stage use some sort of OLE-Stream content that is often kind of difficult to get your head around when your staring at the bytes in a hex editor. After a little reading of the libwps code, a few calculations an jumps in Hex Workshop and a few guesses I managed to work out some code to pull text from this format. Its work in progress and it needs some extensive testing (and it'll get it when I'm using it!) but looks good so far:

import re
import struct

WPSMAGICPATTERN = re.compile(r"(CHNKWKS|CHNKINK)")
WPSSTRIPPATTERN = re.compile(r"\r")

class WPSReader(object):
    TEXT_BLOCK = 0x0E00
   
    def __init__(self, file_name):
        self.file_name = file_name
        self.magic_pattern = WPSMAGICPATTERN
        self.strip_pattern = WPSSTRIPPATTERN
       
    def _process_entries(self, entry_buff):
        magic, local, next_offset = struct.unpack("<HHI", entry_buff[:8])
        if magic != 0x01F8:
            raise ReaderError("Invalid format - Entry magic tag incorrect")
        entry_pos = 0x08 #2 WORDS & 1 DWORD
        for i in range(local):
            size = struct.unpack("<H", entry_buff[entry_pos:entry_pos+0x2])[0]
            name, offset, size = struct.unpack("<2x4s10xII", entry_buff[entry_pos:entry_pos+size])
            if name == "TEXT": #Success!
                return (local, 0x00, offset, size)
            entry_pos += size
        return (local, next_offset, 0x00, 0x00) #Needs to be run again
       
    def extract_text(self):
        with open(self.file_name, "rb") as fd:
            buff = fd.read()
        matches = self.magic_pattern.search(buff)
        if not matches:
            raise ReaderError("No 'Magic' block: not a valid WPS file")
        if matches.groups()[0] == "CHNKINK":
            raise ReaderError("Unable to convert a WPS file prior to version 8")
        headers_start = matches.start()
        entries_pos = headers_start + 24   
        total_entries = struct.unpack("<12xH",  buff[headers_start:headers_start+14])[0]
        while True:        
            entries, next_offset, text_header_offset, text_size = \
                    self._process_entries(buff[entries_pos:])          
            if text_size: #TEXT found
                break
            total_entries -= entries
            if total_entries and next_offset:
                entries_pos = next_offset + self.TEXT_BLOCK #Move to next block
            else:
                raise ReaderError("Unable to find TEXT secion. File corrupt?")
        text_offset = text_header_offset + headers_start #Move to start of text
        block_size = min(self.TEXT_BLOCK, text_size)
        text = buff[text_offset:text_offset+block_size]
        text_size -= block_size
        block_size = min(self.TEXT_BLOCK, text_size)
        if text_size:
            text_offset = 0x800 #Seems to always be the location of second block
            text += buff[text_offset:text_offset+block_size]
            text_size -= block_size
        if text_size:
            text_offset = text_header_offset + headers_start + self.TEXT_BLOCK
            text += buff[text_offset:text_offset+text_size]
        return self.strip_pattern.sub("\r\n", unicode(text, "UTF16"))

import sys
print WPSReader(sys.argv[1]).extract_text()

The greatest number of files are the old style Word 95-2003 (doc) files. Now I need to try do the same with those!

Friday 13 April 2012

ShelobPy: Python File Spider

Added some code to Bitbucket today - https://bitbucket.org/swbsystems/shelobpy

I need a way to pull text out of various document files (Word - both new and old, Open Office, Rich Text, MS Works, PDF, HTML...etc). I had worked on a C project to do this, but the libraries I needed were a pain to install and get working. It had to run on Linux as its going on a web-server and I didn't want to add the Open Office runtime to do the conversions.

So far I have used pyPDF to pull out PDF text, Beautiful Soup to make a little sense of some terrible Word-HTML mark-up and the rest has been done with a little brute force and regular expressions!

It is still __VERY__ hackish, but does use some neat stuff like Natural Language Processing to pull out some decent search terms. Still lots of work to go on it though!