Created
November 26, 2024 02:48
-
-
Save tshrinivasan/12a044b8e861e120d7a7820d8dee9cd5 to your computer and use it in GitHub Desktop.
Fix for Tamil font issues in Reportlab ( python2)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#coding:utf-8 | |
#Copyright ReportLab Europe Ltd. 2000-2009 | |
#see license.txt for license details | |
# author - Selvam <[email protected]> | |
# Fix for Tamil font issues in Reportlab ( python2) | |
# search for "selvam" for the changes he made in this file | |
__version__ = '$Id: ttfonts.py 3608 2009-12-04 16:12:34Z rgbecker $' | |
__doc__="""TrueType font support | |
This defines classes to represent TrueType fonts. They know how to calculate | |
their own width and how to write themselves into PDF files. They support | |
subsetting and embedding and can represent all 16-bit Unicode characters. | |
Note on dynamic fonts | |
--------------------- | |
Usually a Font in ReportLab corresponds to a fixed set of PDF objects (Font, | |
FontDescriptor, Encoding). But with dynamic font subsetting a single TTFont | |
will result in a number of Font/FontDescriptor/Encoding object sets, and the | |
contents of those will depend on the actual characters used for printing. | |
To support dynamic font subsetting a concept of "dynamic font" was introduced. | |
Dynamic Fonts have a _dynamicFont attribute set to 1. | |
Dynamic fonts have the following additional functions:: | |
def splitString(self, text, doc): | |
'''Splits text into a number of chunks, each of which belongs to a | |
single subset. Returns a list of tuples (subset, string). Use | |
subset numbers with getSubsetInternalName. Doc is used to identify | |
a document so that different documents may have different dynamically | |
constructed subsets.''' | |
def getSubsetInternalName(self, subset, doc): | |
'''Returns the name of a PDF Font object corresponding to a given | |
subset of this dynamic font. Use this function instead of | |
PDFDocument.getInternalFontName.''' | |
You must never call PDFDocument.getInternalFontName for dynamic fonts. | |
If you have a traditional static font, mapping to PDF text output operators | |
is simple:: | |
'%s 14 Tf (%s) Tj' % (getInternalFontName(psfontname), text) | |
If you have a dynamic font, use this instead:: | |
for subset, chunk in font.splitString(text, doc): | |
'%s 14 Tf (%s) Tj' % (font.getSubsetInternalName(subset, doc), chunk) | |
(Tf is a font setting operator and Tj is a text ouput operator. You should | |
also escape invalid characters in Tj argument, see TextObject._formatText. | |
Oh, and that 14 up there is font size.) | |
Canvas and TextObject have special support for dynamic fonts. | |
""" | |
import string | |
from struct import pack, unpack, error as structError | |
from reportlab.lib.utils import getStringIO | |
from reportlab.pdfbase import pdfmetrics, pdfdoc | |
class TTFError(pdfdoc.PDFError): | |
"TrueType font exception" | |
pass | |
def SUBSETN(n,table=string.maketrans('0123456789','ABCDEFGHIJ')): | |
return ('%6.6d'%n).translate(table) | |
# | |
# Helpers | |
# | |
from codecs import utf_8_encode, utf_8_decode, latin_1_decode | |
parse_utf8=lambda x, decode=utf_8_decode: map(ord,decode(x)[0]) | |
parse_latin1 = lambda x, decode=latin_1_decode: map(ord,decode(x)[0]) | |
def latin1_to_utf8(text): | |
"helper to convert when needed from latin input" | |
return utf_8_encode(latin_1_decode(text)[0])[0] | |
def makeToUnicodeCMap(fontname, subset): | |
"""Creates a ToUnicode CMap for a given subset. See Adobe | |
_PDF_Reference (ISBN 0-201-75839-3) for more information.""" | |
cmap = [ | |
"/CIDInit /ProcSet findresource begin", | |
"12 dict begin", | |
"begincmap", | |
"/CIDSystemInfo", | |
"<< /Registry (%s)" % fontname, | |
"/Ordering (%s)" % fontname, | |
"/Supplement 0", | |
">> def", | |
"/CMapName /%s def" % fontname, | |
"/CMapType 2 def", | |
"1 begincodespacerange", | |
"<00> <%02X>" % (len(subset) - 1), | |
"endcodespacerange", | |
"%d beginbfchar" % len(subset) | |
] + ["<%02X> <%04X>" % (i,v) for i,v in enumerate(subset)] + [ | |
"endbfchar", | |
"endcmap", | |
"CMapName currentdict /CMap defineresource pop", | |
"end", | |
"end" | |
] | |
return string.join(cmap, "\n") | |
def splice(stream, offset, value): | |
"""Splices the given value into stream at the given offset and | |
returns the resulting stream (the original is unchanged)""" | |
return stream[:offset] + value + stream[offset + len(value):] | |
def _set_ushort(stream, offset, value): | |
"""Writes the given unsigned short value into stream at the given | |
offset and returns the resulting stream (the original is unchanged)""" | |
return splice(stream, offset, pack(">H", value)) | |
try: | |
import _rl_accel | |
except ImportError: | |
try: | |
from reportlab.lib import _rl_accel | |
except ImportError: | |
_rl_accel = None | |
try: | |
hex32 = _rl_accel.hex32 | |
except: | |
def hex32(i): | |
return '0X%8.8X' % (long(i)&0xFFFFFFFFL) | |
try: | |
add32 = _rl_accel.add32L | |
calcChecksum = _rl_accel.calcChecksumL | |
except: | |
def add32(x, y): | |
"Calculate (x + y) modulo 2**32" | |
return (x+y) & 0xFFFFFFFFL | |
def calcChecksum(data): | |
"""Calculates TTF-style checksums""" | |
if len(data)&3: data = data + (4-(len(data)&3))*"\0" | |
return sum(unpack(">%dl" % (len(data)>>2), data)) & 0xFFFFFFFFL | |
del _rl_accel | |
# | |
# TrueType font handling | |
# | |
GF_ARG_1_AND_2_ARE_WORDS = 1 << 0 | |
GF_ARGS_ARE_XY_VALUES = 1 << 1 | |
GF_ROUND_XY_TO_GRID = 1 << 2 | |
GF_WE_HAVE_A_SCALE = 1 << 3 | |
GF_RESERVED = 1 << 4 | |
GF_MORE_COMPONENTS = 1 << 5 | |
GF_WE_HAVE_AN_X_AND_Y_SCALE = 1 << 6 | |
GF_WE_HAVE_A_TWO_BY_TWO = 1 << 7 | |
GF_WE_HAVE_INSTRUCTIONS = 1 << 8 | |
GF_USE_MY_METRICS = 1 << 9 | |
GF_OVERLAP_COMPOUND = 1 << 10 | |
GF_SCALED_COMPONENT_OFFSET = 1 << 11 | |
GF_UNSCALED_COMPONENT_OFFSET = 1 << 12 | |
def TTFOpenFile(fn): | |
'''Opens a TTF file possibly after searching TTFSearchPath | |
returns (filename,file) | |
''' | |
from reportlab.lib.utils import rl_isfile, open_for_read | |
try: | |
f = open_for_read(fn,'rb') | |
return fn, f | |
except IOError: | |
import os | |
if not os.path.isabs(fn): | |
from reportlab import rl_config | |
for D in rl_config.TTFSearchPath: | |
tfn = os.path.join(D,fn) | |
if rl_isfile(tfn): | |
f = open_for_read(tfn,'rb') | |
return tfn, f | |
raise TTFError('Can\'t open file "%s"' % fn) | |
class TTFontParser: | |
"Basic TTF file parser" | |
ttfVersions = (0x00010000,0x74727565,0x74746366) | |
ttcVersions = (0x00010000,0x00020000) | |
fileKind='TTF' | |
def __init__(self, file, validate=0,subfontIndex=0): | |
"""Loads and parses a TrueType font file. file can be a filename or a | |
file object. If validate is set to a false values, skips checksum | |
validation. This can save time, especially if the font is large. | |
""" | |
self.validate = validate | |
self.readFile(file) | |
isCollection = self.readHeader() | |
if isCollection: | |
self.readTTCHeader() | |
self.getSubfont(subfontIndex) | |
else: | |
if self.validate: self.checksumFile() | |
self.readTableDirectory() | |
self.subfontNameX = '' | |
def readTTCHeader(self): | |
self.ttcVersion = self.read_ulong() | |
self.fileKind = 'TTC' | |
self.ttfVersions = self.ttfVersions[:-1] | |
if self.ttcVersion not in self.ttcVersions: | |
raise TTFError('"%s" is not a %s file: can\'t read version 0x%8.8x' %(self.filename,self.fileKind,self.ttcVersion)) | |
self.numSubfonts = self.read_ulong() | |
self.subfontOffsets = [] | |
a = self.subfontOffsets.append | |
for i in xrange(self.numSubfonts): | |
a(self.read_ulong()) | |
def getSubfont(self,subfontIndex): | |
if self.fileKind!='TTC': | |
raise TTFError('"%s" is not a TTC file: use this method' % (self.filename,self.fileKind)) | |
try: | |
pos = self.subfontOffsets[subfontIndex] | |
except IndexError: | |
raise TTFError('TTC file "%s": bad subfontIndex %s not in [0,%d]' % (self.filename,subfontIndex,self.numSubfonts-1)) | |
self.seek(pos) | |
self.readHeader() | |
self.readTableDirectory() | |
self.subfontNameX = '-'+str(subfontIndex) | |
def readTableDirectory(self): | |
try: | |
self.numTables = self.read_ushort() | |
self.searchRange = self.read_ushort() | |
self.entrySelector = self.read_ushort() | |
self.rangeShift = self.read_ushort() | |
# Read table directory | |
self.table = {} | |
self.tables = [] | |
for n in xrange(self.numTables): | |
record = {} | |
record['tag'] = self.read_tag() | |
record['checksum'] = self.read_ulong() | |
record['offset'] = self.read_ulong() | |
record['length'] = self.read_ulong() | |
self.tables.append(record) | |
self.table[record['tag']] = record | |
except: | |
raise TTFError('Corrupt %s file "%s" cannot read Table Directory' % (self.fileKind, self.filename)) | |
if self.validate: self.checksumTables() | |
def readHeader(self): | |
'''read the sfnt header at the current position''' | |
try: | |
self.version = version = self.read_ulong() | |
except: | |
raise TTFError('"%s" is not a %s file: can\'t read version' %(self.filename,self.fileKind)) | |
if version==0x4F54544F: | |
raise TTFError('%s file "%s": postscript outlines are not supported'%(self.fileKind,self.filename)) | |
if version not in self.ttfVersions: | |
raise TTFError('Not a TrueType font: version=0x%8.8X' % version) | |
return version==self.ttfVersions[-1] | |
def readFile(self,f): | |
if hasattr(f,'read'): | |
self.filename = '(ttf)' | |
else: | |
self.filename, f = TTFOpenFile(f) | |
self._ttf_data = f.read() | |
self._pos = 0 | |
def checksumTables(self): | |
# Check the checksums for all tables | |
for t in self.tables: | |
table = self.get_chunk(t['offset'], t['length']) | |
checksum = calcChecksum(table) | |
if t['tag'] == 'head': | |
adjustment = unpack('>l', table[8:8+4])[0] | |
checksum = add32(checksum, -adjustment) | |
xchecksum = t['checksum'] | |
if xchecksum != checksum: | |
raise TTFError('TTF file "%s": invalid checksum %s table: %s (expected %s)' % (self.filename,hex32(checksum),t['tag'],hex32(xchecksum))) | |
def checksumFile(self): | |
# Check the checksums for the whole file | |
checksum = calcChecksum(self._ttf_data) | |
if 0xB1B0AFBAL!=checksum: | |
raise TTFError('TTF file "%s": invalid checksum %s (expected 0xB1B0AFBA) len: %d &3: %d' % (self.filename,hex32(checksum),len(self._ttf_data),(len(self._ttf_data)&3))) | |
def get_table_pos(self, tag): | |
"Returns the offset and size of a given TTF table." | |
offset = self.table[tag]['offset'] | |
length = self.table[tag]['length'] | |
return (offset, length) | |
def seek(self, pos): | |
"Moves read pointer to a given offset in file." | |
self._pos = pos | |
def skip(self, delta): | |
"Skip the given number of bytes." | |
self._pos = self._pos + delta | |
def seek_table(self, tag, offset_in_table = 0): | |
"""Moves read pointer to the given offset within a given table and | |
returns absolute offset of that position in the file.""" | |
self._pos = self.get_table_pos(tag)[0] + offset_in_table | |
return self._pos | |
def read_tag(self): | |
"Read a 4-character tag" | |
self._pos += 4 | |
return self._ttf_data[self._pos - 4:self._pos] | |
def read_ushort(self): | |
"Reads an unsigned short" | |
self._pos += 2 | |
tmp = unpack('>H',self._ttf_data[self._pos-2:self._pos])[0] | |
#print tmp,"---TMP" | |
return tmp | |
def read_ulong(self): | |
"Reads an unsigned long" | |
self._pos += 4 | |
return unpack('>L',self._ttf_data[self._pos - 4:self._pos])[0] | |
#selvam | |
def read_short(self): | |
"Reads a signed short" | |
#print self._pos,"POS" | |
self._pos += 2 | |
try: | |
#print self._ttf_data[self._pos-2:self._pos],"-----READ" | |
#return -1 | |
return unpack('>h',self._ttf_data[self._pos-2:self._pos])[0] | |
except structError, error: | |
raise TTFError, error | |
def get_ushort(self, pos): | |
"Return an unsigned short at given position" | |
return unpack('>H',self._ttf_data[pos:pos+2])[0] | |
def get_ulong(self, pos): | |
"Return an unsigned long at given position" | |
return unpack('>L',self._ttf_data[pos:pos+4])[0] | |
def get_chunk(self, pos, length): | |
"Return a chunk of raw data at given position" | |
return self._ttf_data[pos:pos+length] | |
def get_table(self, tag): | |
"Return the given TTF table" | |
pos, length = self.get_table_pos(tag) | |
return self._ttf_data[pos:pos+length] | |
class TTFontMaker: | |
"Basic TTF file generator" | |
def __init__(self): | |
"Initializes the generator." | |
self.tables = {} | |
def add(self, tag, data): | |
"Adds a table to the TTF file." | |
if tag == 'head': | |
data = splice(data, 8, '\0\0\0\0') | |
self.tables[tag] = data | |
def makeStream(self): | |
"Finishes the generation and returns the TTF file as a string" | |
stm = getStringIO() | |
write = stm.write | |
numTables = len(self.tables) | |
searchRange = 1 | |
entrySelector = 0 | |
while searchRange * 2 <= numTables: | |
searchRange = searchRange * 2 | |
entrySelector = entrySelector + 1 | |
searchRange = searchRange * 16 | |
rangeShift = numTables * 16 - searchRange | |
# Header | |
write(pack(">lHHHH", 0x00010000, numTables, searchRange, | |
entrySelector, rangeShift)) | |
# Table directory | |
tables = self.tables.items() | |
tables.sort() # XXX is this the correct order? | |
offset = 12 + numTables * 16 | |
for tag, data in tables: | |
if tag == 'head': | |
head_start = offset | |
checksum = calcChecksum(data) | |
write(tag) | |
write(pack(">LLL", checksum, offset, len(data))) | |
paddedLength = (len(data)+3)&~3 | |
offset = offset + paddedLength | |
# Table data | |
for tag, data in tables: | |
data += "\0\0\0" | |
write(data[:len(data)&~3]) | |
checksum = calcChecksum(stm.getvalue()) | |
checksum = add32(0xB1B0AFBAL, -checksum) | |
stm.seek(head_start + 8) | |
write(pack('>L', checksum)) | |
return stm.getvalue() | |
class TTFontFile(TTFontParser): | |
"TTF file parser and generator" | |
def __init__(self, file, charInfo=1, validate=0,subfontIndex=0): | |
"""Loads and parses a TrueType font file. | |
file can be a filename or a file object. If validate is set to a false | |
values, skips checksum validation. This can save time, especially if | |
the font is large. See TTFontFile.extractInfo for more information. | |
""" | |
TTFontParser.__init__(self, file, validate=validate,subfontIndex=subfontIndex) | |
self.extractInfo(charInfo) | |
def extractInfo(self, charInfo=1): | |
""" | |
Extract typographic information from the loaded font file. | |
The following attributes will be set:: | |
name PostScript font name | |
flags Font flags | |
ascent Typographic ascender in 1/1000ths of a point | |
descent Typographic descender in 1/1000ths of a point | |
capHeight Cap height in 1/1000ths of a point (0 if not available) | |
bbox Glyph bounding box [l,t,r,b] in 1/1000ths of a point | |
_bbox Glyph bounding box [l,t,r,b] in unitsPerEm | |
unitsPerEm Glyph units per em | |
italicAngle Italic angle in degrees ccw | |
stemV stem weight in 1/1000ths of a point (approximate) | |
If charInfo is true, the following will also be set:: | |
defaultWidth default glyph width in 1/1000ths of a point | |
charWidths dictionary of character widths for every supported UCS character | |
code | |
This will only work if the font has a Unicode cmap (platform 3, | |
encoding 1, format 4 or platform 0 any encoding format 4). Setting | |
charInfo to false avoids this requirement | |
""" | |
# name - Naming table | |
name_offset = self.seek_table("name") | |
format = self.read_ushort() | |
if format != 0: | |
raise TTFError, "Unknown name table format (%d)" % format | |
numRecords = self.read_ushort() | |
print numRecords,"NUMRECORDS" | |
string_data_offset = name_offset + self.read_ushort() | |
names = {1:None,2:None,3:None,4:None,6:None} | |
K = names.keys() | |
nameCount = len(names) | |
for i in xrange(numRecords): | |
platformId = self.read_ushort() | |
encodingId = self.read_ushort() | |
languageId = self.read_ushort() | |
nameId = self.read_ushort() | |
length = self.read_ushort() | |
offset = self.read_ushort() | |
if nameId not in K: continue | |
N = None | |
if platformId == 3 and encodingId == 1 and languageId == 0x409: # Microsoft, Unicode, US English, PS Name | |
opos = self._pos | |
try: | |
self.seek(string_data_offset + offset) | |
if length % 2 != 0: | |
raise TTFError, "PostScript name is UTF-16BE string of odd length" | |
length /= 2 | |
N = [] | |
A = N.append | |
while length > 0: | |
char = self.read_ushort() | |
A(chr(char)) | |
length -= 1 | |
N = ''.join(N) | |
finally: | |
self._pos = opos | |
elif platformId == 1 and encodingId == 0 and languageId == 0: # Macintosh, Roman, English, PS Name | |
# According to OpenType spec, if PS name exists, it must exist | |
# both in MS Unicode and Macintosh Roman formats. Apparently, | |
# you can find live TTF fonts which only have Macintosh format. | |
N = self.get_chunk(string_data_offset + offset, length) | |
if N and names[nameId]==None: | |
names[nameId] = N | |
nameCount -= 1 | |
if nameCount==0: break | |
if names[6] is not None: | |
psName = names[6].replace(" ", "-") #Dinu Gherman's fix for font names with spaces | |
elif names[4] is not None: | |
psName = names[4].replace(" ", "-") | |
# Fine, one last try before we bail. | |
elif names[1] is not None: | |
psName = names[1].replace(" ", "-") | |
else: | |
psName = None | |
# Don't just assume, check for None since some shoddy fonts cause crashes here... | |
if not psName: | |
raise TTFError, "Could not find PostScript font name" | |
for c in psName: | |
oc = ord(c) | |
if oc>126 or c in ' [](){}<>/%': | |
raise TTFError, "psName=%r contains invalid character '%s' ie U+%04X" % (psName,c,ord(c)) | |
self.name = psName | |
self.familyName = names[1] or psName | |
self.styleName = names[2] or 'Regular' | |
self.fullName = names[4] or psName | |
self.uniqueFontID = names[3] or psName | |
print self.name,"FNAME" | |
# head - Font header table | |
self.seek_table("head") | |
ver_maj, ver_min = self.read_ushort(), self.read_ushort() | |
if ver_maj != 1: | |
raise TTFError, 'Unknown head table version %d.%04x' % (ver_maj, ver_min) | |
self.fontRevision = self.read_ushort(), self.read_ushort() | |
self.skip(4) | |
magic = self.read_ulong() | |
if magic != 0x5F0F3CF5: | |
raise TTFError, 'Invalid head table magic %04x' % magic | |
self.skip(2) | |
self.unitsPerEm = unitsPerEm = self.read_ushort() | |
scale = lambda x, unitsPerEm=unitsPerEm: x * 1000. / unitsPerEm | |
self.skip(16) | |
xMin = self.read_short() | |
yMin = self.read_short() | |
xMax = self.read_short() | |
yMax = self.read_short() | |
self.bbox = map(scale, [xMin, yMin, xMax, yMax]) | |
self.skip(3*2) | |
indexToLocFormat = self.read_ushort() | |
glyphDataFormat = self.read_ushort() | |
# OS/2 - OS/2 and Windows metrics table | |
# (needs data from head table) | |
if self.table.has_key("OS/2"): | |
self.seek_table("OS/2") | |
version = self.read_ushort() | |
self.skip(2) | |
usWeightClass = self.read_ushort() | |
self.skip(2) | |
fsType = self.read_ushort() | |
if fsType == 0x0002 or (fsType & 0x0300) != 0: | |
raise TTFError, 'Font does not allow subsetting/embedding (%04X)' % fsType | |
self.skip(58) #11*2 + 10 + 4*4 + 4 + 3*2 | |
sTypoAscender = self.read_short() | |
sTypoDescender = self.read_short() | |
self.ascent = scale(sTypoAscender) # XXX: for some reason it needs to be multiplied by 1.24--1.28 | |
self.descent = scale(sTypoDescender) | |
if version > 1: | |
self.skip(16) #3*2 + 2*4 + 2 | |
sCapHeight = self.read_short() | |
self.capHeight = scale(sCapHeight) | |
else: | |
self.capHeight = self.ascent | |
else: | |
# Microsoft TTFs require an OS/2 table; Apple ones do not. Try to | |
# cope. The data is not very important anyway. | |
usWeightClass = 500 | |
self.ascent = scale(yMax) | |
self.descent = scale(yMin) | |
self.capHeight = self.ascent | |
# There's no way to get stemV from a TTF file short of analyzing actual outline data | |
# This fuzzy formula is taken from pdflib sources, but we could just use 0 here | |
self.stemV = 50 + int((usWeightClass / 65.0) ** 2) | |
# post - PostScript table | |
# (needs data from OS/2 table) | |
self.seek_table("post") | |
ver_maj, ver_min = self.read_ushort(), self.read_ushort() | |
if ver_maj not in (1, 2, 3, 4): | |
# Adobe/MS documents 1, 2, 2.5, 3; Apple also has 4. | |
# From Apple docs it seems that we do not need to care | |
# about the exact version, so if you get this error, you can | |
# try to remove this check altogether. | |
raise TTFError, 'Unknown post table version %d.%04x' % (ver_maj, ver_min) | |
self.italicAngle = self.read_short() + self.read_ushort() / 65536.0 | |
self.underlinePosition = self.read_short() | |
self.underlineThickness = self.read_short() | |
isFixedPitch = self.read_ulong() | |
self.flags = FF_SYMBOLIC # All fonts that contain characters | |
# outside the original Adobe character | |
# set are considered "symbolic". | |
if self.italicAngle!= 0: | |
self.flags = self.flags | FF_ITALIC | |
if usWeightClass >= 600: # FW_REGULAR == 500, FW_SEMIBOLD == 600 | |
self.flags = self.flags | FF_FORCEBOLD | |
if isFixedPitch: | |
self.flags = self.flags | FF_FIXED | |
# XXX: FF_SERIF? FF_SCRIPT? FF_ALLCAP? FF_SMALLCAP? | |
# hhea - Horizontal header table | |
self.seek_table("hhea") | |
ver_maj, ver_min = self.read_ushort(), self.read_ushort() | |
if ver_maj != 1: | |
raise TTFError, 'Unknown hhea table version %d.%04x' % (ver_maj, ver_min) | |
self.skip(28) | |
metricDataFormat = self.read_ushort() | |
if metricDataFormat != 0: | |
raise TTFError, 'Unknown horizontal metric data format (%d)' % metricDataFormat | |
numberOfHMetrics = self.read_ushort() | |
if numberOfHMetrics == 0: | |
raise TTFError, 'Number of horizontal metrics is 0' | |
# maxp - Maximum profile table | |
self.seek_table("maxp") | |
ver_maj, ver_min = self.read_ushort(), self.read_ushort() | |
if ver_maj != 1: | |
raise TTFError, 'Unknown maxp table version %d.%04x' % (ver_maj, ver_min) | |
numGlyphs = self.read_ushort() | |
if not charInfo: | |
self.charToGlyph = None | |
self.defaultWidth = None | |
self.charWidths = None | |
return | |
if glyphDataFormat != 0: | |
raise TTFError, 'Unknown glyph data format (%d)' % glyphDataFormat | |
# cmap - Character to glyph index mapping table | |
cmap_offset = self.seek_table("cmap") | |
self.skip(2) | |
cmapTableCount = self.read_ushort() | |
unicode_cmap_offset = None | |
for n in xrange(cmapTableCount): | |
platformID = self.read_ushort() | |
encodingID = self.read_ushort() | |
offset = self.read_ulong() | |
if platformID == 3 and encodingID == 1: # Microsoft, Unicode | |
format = self.get_ushort(cmap_offset + offset) | |
if format == 4: | |
unicode_cmap_offset = cmap_offset + offset | |
break | |
elif platformID == 0: # Unicode -- assume all encodings are compatible | |
format = self.get_ushort(cmap_offset + offset) | |
if format == 4: | |
unicode_cmap_offset = cmap_offset + offset | |
break | |
if unicode_cmap_offset is None: | |
raise TTFError, 'Font does not have cmap for Unicode (platform 3, encoding 1, format 4 or platform 0 any encoding format 4)' | |
self.seek(unicode_cmap_offset + 2) | |
length = self.read_ushort() | |
limit = unicode_cmap_offset + length | |
self.skip(2) | |
segCount = self.read_ushort() / 2 | |
self.skip(6) | |
print "----------------------------------------------------------------------" | |
endCount = map(lambda x, self=self: self.read_ushort(), xrange(segCount)) | |
if self.name == 'Lohit-Tamil':#selvam adds this | |
endCount[-1]= 65648 | |
print "----------------------------------------------------------------------" | |
self.skip(2) | |
startCount = map(lambda x, self=self: self.read_ushort(), xrange(segCount)) | |
#startCount.append(65537) | |
idDelta = map(lambda x, self=self: self.read_short(), xrange(segCount)) | |
#segCount = 15 | |
idRangeOffset_start = self._pos | |
idRangeOffset = map(lambda x, self=self: self.read_ushort(), xrange(segCount)) | |
# Now it gets tricky. | |
glyphToChar = {} | |
charToGlyph = {} | |
#print segCount,"SEG-------------------" | |
print startCount,"START COUNT" | |
print endCount | |
for n in xrange(segCount): | |
for unichar in xrange(startCount[n], endCount[n] + 1): | |
if unichar>=65547 or unichar == 3007 or unichar == 2986: | |
if unichar==65547 : | |
#idRangeOffset[n] = 1 | |
pass | |
print unichar,"UUUUUUUUUUUuu",idDelta[n] | |
#idDelta[n] = 10 | |
if idRangeOffset[n] == 0: | |
glyph = (unichar + idDelta[n]) & 0xFFFF | |
#if unichar>=65537 and unichar<=65583: | |
# glyph = unichar-65537+134 | |
if unichar>=65537 and unichar<=65648: | |
glyph = unichar-65537+134 | |
#if unichar>=65585 and unichar<=65602: | |
# glyph = unichar-65585+176 | |
else: | |
offset = (unichar - startCount[n]) * 2 + idRangeOffset[n] | |
offset = idRangeOffset_start + 2 * n + offset | |
print limit,"limit ",offset | |
if offset >= limit: | |
# workaround for broken fonts (like Thryomanes) | |
glyph = 0 | |
print "BROKERN GGGGGG4",unichar | |
else: | |
glyph = self.get_ushort(offset) | |
if unichar==65547: | |
print glyph,"GGGGGGGGG2" | |
if glyph != 0: | |
glyph = (glyph + idDelta[n]) & 0xFFFF | |
if unichar==65547: | |
print glyph,"GGGGGGGGG3" | |
#print glyph,"----GGGGGG5" | |
charToGlyph[unichar] = glyph | |
if glyphToChar.has_key(glyph): | |
glyphToChar[glyph].append(unichar) | |
else: | |
glyphToChar[glyph] = [unichar] | |
self.charToGlyph = charToGlyph | |
# hmtx - Horizontal metrics table | |
# (needs data from hhea, maxp, and cmap tables) | |
self.seek_table("hmtx") | |
aw = None | |
self.charWidths = {} | |
self.hmetrics = [] | |
for glyph in xrange(numberOfHMetrics): | |
# advance width and left side bearing. lsb is actually signed | |
# short, but we don't need it anyway (except for subsetting) | |
aw, lsb = self.read_ushort(), self.read_ushort() | |
self.hmetrics.append((aw, lsb)) | |
aw = scale(aw) | |
if glyph == 0: | |
self.defaultWidth = aw | |
print "Default width" | |
if glyphToChar.has_key(glyph): | |
for i,char in enumerate(glyphToChar[glyph]): | |
self.charWidths[char] = aw | |
#if char == 3007: | |
# self.charWidths[char] = 450 | |
#print aw,"AWAW",char | |
for glyph in xrange(numberOfHMetrics, numGlyphs): | |
# the rest of the table only lists advance left side bearings. | |
# so we reuse aw set by the last iteration of the previous loop | |
lsb = self.read_ushort() | |
self.hmetrics.append((aw, lsb)) | |
if glyphToChar.has_key(glyph): | |
for char in glyphToChar[glyph]: | |
self.charWidths[char] = aw | |
# loca - Index to location | |
self.seek_table('loca') | |
self.glyphPos = [] | |
if indexToLocFormat == 0: | |
for n in xrange(numGlyphs + 1): | |
self.glyphPos.append(self.read_ushort() << 1) | |
elif indexToLocFormat == 1: | |
for n in xrange(numGlyphs + 1): | |
self.glyphPos.append(self.read_ulong()) | |
else: | |
raise TTFError, 'Unknown location table format (%d)' % indexToLocFormat | |
# Subsetting | |
#SELVAM | |
def makeSubset(self, subset): | |
"""Create a subset of a TrueType font""" | |
output = TTFontMaker() | |
# Build a mapping of glyphs in the subset to glyph numbers in | |
# the original font. Also build a mapping of UCS codes to | |
# glyph values in the new font. | |
# Start with 0 -> 0: "missing character" | |
glyphMap = [0] # new glyph index -> old glyph index | |
glyphSet = {0:0} # old glyph index -> new glyph index | |
codeToGlyph = {} # unicode -> new glyph index | |
#subset = [ 65618 if s==3007 or s==2986 else s for s in subset ] | |
for sel,code in enumerate(subset): | |
#print sel,code | |
#print code,"CODE",len(subset),"->",sel | |
#self.charWidths[code] = 2000 | |
if code==2986: | |
pass | |
#if len(subset)-1 > sel and subset[sel+1] == 3007: | |
#print "WWWWWWWWWWWWWWWWWWWWWWWWW",self.charWidths[code] | |
#self.charWidths[code] = 0 | |
else: | |
pass | |
#if self.charWidths.has_key(code): | |
# print self.charWidths[code],"WOWOWWO" | |
if self.charToGlyph.has_key(code): | |
originalGlyphIdx = self.charToGlyph[code] | |
#print "HAS JKEY" | |
else: | |
originalGlyphIdx = 0 | |
if not glyphSet.has_key(originalGlyphIdx): | |
glyphSet[originalGlyphIdx] = len(glyphMap) | |
glyphMap.append(originalGlyphIdx) | |
codeToGlyph[code] = glyphSet[originalGlyphIdx] | |
#print originalGlyphIdx,"Glyph index",code,glyphSet[originalGlyphIdx] | |
# Also include glyphs that are parts of composite glyphs | |
start = self.get_table_pos('glyf')[0] | |
#print self.charToGlyph,"CGGGGGG" | |
n = 0 | |
while n < len(glyphMap): | |
originalGlyphIdx = glyphMap[n] | |
glyphPos = self.glyphPos[originalGlyphIdx] | |
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos | |
#print glyphPos,glyphLen,"-----------+",originalGlyphIdx | |
n += 1 | |
if not glyphLen: continue | |
self.seek(start + glyphPos) | |
numberOfContours = self.read_short() | |
#print numberOfContours,"======" | |
#numberOfContours = -1 | |
if numberOfContours < 0: | |
# composite glyph | |
self.skip(8) | |
flags = GF_MORE_COMPONENTS | |
while flags & GF_MORE_COMPONENTS: | |
flags = self.read_ushort() | |
glyphIdx = self.read_ushort() | |
if not glyphSet.has_key(glyphIdx): | |
glyphSet[glyphIdx] = len(glyphMap) | |
glyphMap.append(glyphIdx) | |
if flags & GF_ARG_1_AND_2_ARE_WORDS: | |
self.skip(4) | |
else: | |
self.skip(2) | |
if flags & GF_WE_HAVE_A_SCALE: | |
self.skip(2) | |
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE: | |
self.skip(4) | |
elif flags & GF_WE_HAVE_A_TWO_BY_TWO: | |
self.skip(8) | |
numGlyphs = n = len(glyphMap) | |
while n > 1 and self.hmetrics[n][0] == self.hmetrics[n - 1][0]: | |
n -= 1 | |
numberOfHMetrics = n | |
# The following tables are simply copied from the original | |
for tag in ('name', 'OS/2', 'cvt ', 'fpgm', 'prep'): | |
try: | |
output.add(tag, self.get_table(tag)) | |
except KeyError: | |
# Apparently some of the tables are optional (cvt, fpgm, prep). | |
# The lack of the required ones (name, OS/2) would have already | |
# been caught before. | |
pass | |
# post - PostScript | |
post = "\x00\x03\x00\x00" + self.get_table('post')[4:16] + "\x00" * 16 | |
output.add('post', post) | |
# hhea - Horizontal Header | |
hhea = self.get_table('hhea') | |
hhea = _set_ushort(hhea, 34, numberOfHMetrics) | |
output.add('hhea', hhea) | |
# maxp - Maximum Profile | |
maxp = self.get_table('maxp') | |
maxp = _set_ushort(maxp, 4, numGlyphs) | |
output.add('maxp', maxp) | |
# cmap - Character to glyph mapping | |
# XXX maybe use format 0 if possible, not 6? | |
entryCount = len(subset) | |
length = 10 + entryCount * 2 | |
cmap = [0, 1, # version, number of tables | |
1, 0, 0,12, # platform, encoding, offset (hi,lo) | |
6, length, 0, # format, length, language | |
0, | |
entryCount] + \ | |
map(codeToGlyph.get, subset) | |
cmap = apply(pack, [">%dH" % len(cmap)] + cmap) | |
output.add('cmap', cmap) | |
# hmtx - Horizontal Metrics | |
hmtx = [] | |
for n in xrange(numGlyphs): | |
originalGlyphIdx = glyphMap[n] | |
aw, lsb = self.hmetrics[originalGlyphIdx] | |
if n < numberOfHMetrics: | |
hmtx.append(int(aw)) | |
hmtx.append(int(lsb)) | |
hmtx = apply(pack, [">%dH" % len(hmtx)] + hmtx) | |
output.add('hmtx', hmtx) | |
# glyf - Glyph data | |
glyphData = self.get_table('glyf') | |
offsets = [] | |
glyf = [] | |
pos = 0 | |
for n in xrange(numGlyphs): | |
offsets.append(pos) | |
originalGlyphIdx = glyphMap[n] | |
glyphPos = self.glyphPos[originalGlyphIdx] | |
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos | |
data = glyphData[glyphPos:glyphPos+glyphLen] | |
# Fix references in composite glyphs | |
#if glyphLen > 2: | |
#print "GLGLGL_>>>>>>>>>>>>>>>>>>>>>.",unpack(">h", data[:2]),n | |
if glyphLen > 2 and unpack(">h", data[:2])[0] < 0: | |
# composite glyph | |
pos_in_glyph = 10 | |
flags = GF_MORE_COMPONENTS | |
while flags & GF_MORE_COMPONENTS: | |
flags = unpack(">H", data[pos_in_glyph:pos_in_glyph+2])[0] | |
glyphIdx = unpack(">H", data[pos_in_glyph+2:pos_in_glyph+4])[0] | |
data = _set_ushort(data, pos_in_glyph + 2, glyphSet[glyphIdx]) | |
pos_in_glyph = pos_in_glyph + 4 | |
if flags & GF_ARG_1_AND_2_ARE_WORDS: | |
pos_in_glyph = pos_in_glyph + 4 | |
else: | |
pos_in_glyph = pos_in_glyph + 2 | |
if flags & GF_WE_HAVE_A_SCALE: | |
pos_in_glyph = pos_in_glyph + 2 | |
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE: | |
pos_in_glyph = pos_in_glyph + 4 | |
elif flags & GF_WE_HAVE_A_TWO_BY_TWO: | |
pos_in_glyph = pos_in_glyph + 8 | |
glyf.append(data) | |
pos = pos + glyphLen | |
if pos % 4 != 0: | |
padding = 4 - pos % 4 | |
glyf.append('\0' * padding) | |
pos = pos + padding | |
offsets.append(pos) | |
output.add('glyf', string.join(glyf, "")) | |
# loca - Index to location | |
loca = [] | |
if (pos + 1) >> 1 > 0xFFFF: | |
indexToLocFormat = 1 # long format | |
for offset in offsets: | |
loca.append(offset) | |
loca = apply(pack, [">%dL" % len(loca)] + loca) | |
else: | |
indexToLocFormat = 0 # short format | |
for offset in offsets: | |
loca.append(offset >> 1) | |
loca = apply(pack, [">%dH" % len(loca)] + loca) | |
output.add('loca', loca) | |
# head - Font header | |
head = self.get_table('head') | |
head = _set_ushort(head, 50, indexToLocFormat) | |
output.add('head', head) | |
return output.makeStream() | |
# | |
# TrueType font embedding | |
# | |
# PDF font flags (see PDF Reference Guide table 5.19) | |
FF_FIXED = 1 << 1-1 | |
FF_SERIF = 1 << 2-1 | |
FF_SYMBOLIC = 1 << 3-1 | |
FF_SCRIPT = 1 << 4-1 | |
FF_NONSYMBOLIC = 1 << 6-1 | |
FF_ITALIC = 1 << 7-1 | |
FF_ALLCAP = 1 << 17-1 | |
FF_SMALLCAP = 1 << 18-1 | |
FF_FORCEBOLD = 1 << 19-1 | |
class TTFontFace(TTFontFile, pdfmetrics.TypeFace): | |
"""TrueType typeface. | |
Conceptually similar to a single byte typeface, but the glyphs are | |
identified by UCS character codes instead of glyph names.""" | |
def __init__(self, filename, validate=0, subfontIndex=0): | |
"Loads a TrueType font from filename." | |
pdfmetrics.TypeFace.__init__(self, None) | |
TTFontFile.__init__(self, filename, validate=validate, subfontIndex=subfontIndex) | |
def getCharWidth(self, code): | |
"Returns the width of character U+<code>" | |
return self.charWidths.get(code, self.defaultWidth) | |
def addSubsetObjects(self, doc, fontname, subset): | |
"""Generate a TrueType font subset and add it to the PDF document. | |
Returns a PDFReference to the new FontDescriptor object.""" | |
fontFile = pdfdoc.PDFStream() | |
fontFile.content = self.makeSubset(subset) | |
fontFile.dictionary['Length1'] = len(fontFile.content) | |
if doc.compression: | |
fontFile.filters = [pdfdoc.PDFZCompress] | |
fontFileRef = doc.Reference(fontFile, 'fontFile:%s(%s)' % (self.filename, fontname)) | |
flags = self.flags & ~ FF_NONSYMBOLIC | |
flags = flags | FF_SYMBOLIC | |
fontDescriptor = pdfdoc.PDFDictionary({ | |
'Type': '/FontDescriptor', | |
'Ascent': self.ascent, | |
'CapHeight': self.capHeight, | |
'Descent': self.descent, | |
'Flags': flags, | |
'FontBBox': pdfdoc.PDFArray(self.bbox), | |
'FontName': pdfdoc.PDFName(fontname), | |
'ItalicAngle': self.italicAngle, | |
'StemV': self.stemV, | |
'FontFile2': fontFileRef, | |
}) | |
return doc.Reference(fontDescriptor, 'fontDescriptor:' + fontname) | |
class TTEncoding: | |
"""Encoding for TrueType fonts (always UTF-8). | |
TTEncoding does not directly participate in PDF object creation, since | |
we need a number of different 8-bit encodings for every generated font | |
subset. TTFont itself cares about that.""" | |
def __init__(self): | |
self.name = "UTF-8" | |
class TTFont: | |
"""Represents a TrueType font. | |
Its encoding is always UTF-8. | |
Note: you cannot use the same TTFont object for different documents | |
at the same time. | |
Example of usage: | |
font = ttfonts.TTFont('PostScriptFontName', '/path/to/font.ttf') | |
pdfmetrics.registerFont(font) | |
canvas.setFont('PostScriptFontName', size) | |
canvas.drawString(x, y, "Some text encoded in UTF-8") | |
""" | |
class State: | |
namePrefix = 'F' | |
def __init__(self,asciiReadable=1): | |
self.assignments = {} | |
self.nextCode = 0 | |
self.internalName = None | |
self.frozen = 0 | |
if asciiReadable: | |
# Let's add the first 128 unicodes to the 0th subset, so ' ' | |
# always has code 32 (for word spacing to work) and the ASCII | |
# output is readable | |
subset0 = range(128) | |
self.subsets = [subset0] | |
for n in subset0: | |
self.assignments[n] = n | |
self.nextCode = 128 | |
else: | |
self.subsets = [[32]*33] | |
self.assignments[32] = 32 | |
_multiByte = 1 # We want our own stringwidth | |
_dynamicFont = 1 # We want dynamic subsetting | |
def __init__(self, name, filename, validate=0, subfontIndex=0,asciiReadable=1): | |
"""Loads a TrueType font from filename. | |
If validate is set to a false values, skips checksum validation. This | |
can save time, especially if the font is large. | |
""" | |
self.fontName = name | |
self.face = TTFontFace(filename, validate=validate, subfontIndex=subfontIndex) | |
self.encoding = TTEncoding() | |
from weakref import WeakKeyDictionary | |
self.state = WeakKeyDictionary() | |
self._asciiReadable = asciiReadable | |
def _py_stringWidth(self, text, size, encoding='utf-8'): | |
"Calculate text width" | |
if not isinstance(text,unicode): | |
text = unicode(text, encoding or 'utf-8') # encoding defaults to utf-8 | |
g = self.face.charWidths.get | |
dw = self.face.defaultWidth | |
return 0.001*size*sum([g(ord(u),dw) for u in text]) | |
stringWidth = _py_stringWidth | |
def _assignState(self,doc,asciiReadable=None,namePrefix=None): | |
'''convenience function for those wishing to roll their own state properties''' | |
if asciiReadable is None: | |
asciiReadable = self._asciiReadable | |
try: | |
state = self.state[doc] | |
except KeyError: | |
state = self.state[doc] = TTFont.State(asciiReadable) | |
if namePrefix is not None: | |
state.namePrefix = namePrefix | |
return state | |
def splitString(self, text, doc, encoding='utf-8'): | |
"""Splits text into a number of chunks, each of which belongs to a | |
single subset. Returns a list of tuples (subset, string). Use subset | |
numbers with getSubsetInternalName. Doc is needed for distinguishing | |
subsets when building different documents at the same time.""" | |
asciiReadable = self._asciiReadable | |
try: state = self.state[doc] | |
except KeyError: state = self.state[doc] = TTFont.State(asciiReadable) | |
curSet = -1 | |
cur = [] | |
results = [] | |
if not isinstance(text,unicode): | |
text = unicode(text, encoding or 'utf-8') # encoding defaults to utf-8 | |
#selvam patching | |
#print "Encoding",text,len(text) | |
tmp = list(text) | |
limit = len(tmp) | |
#sel_map = {'2965_3007': 65537,'2969_3007' : 65538} | |
i = 0 | |
for t in list(text): | |
#print ord(t),"--t" | |
print tmp,"Before" | |
if ord(t)==3015 or ord(t)==3014 or ord(t)==3016: | |
#print "T is obc7",i, limit | |
if i <= len(tmp): | |
backup = tmp[i] | |
if ord(tmp[i-1]) not in (3014,3015,3016): | |
tmp[i]=tmp[i-1] | |
tmp[i-1]= backup | |
print tmp,"3016 tmp now" | |
if ord(t)==3018: | |
tmp[i] = u'ெ' #chr(3015) | |
if i <= len(tmp): | |
if ord(tmp[i-1]) not in (3018,3019,3020): | |
backup = tmp[i] | |
tmp[i]=tmp[i-1] | |
tmp[i-1]= backup | |
tmp.insert(i+1,u'ா') | |
i=i+1 | |
print tmp,"3018 tmp now" | |
pass | |
if ord(t)==3019: | |
if ord(tmp[i-1]) not in (3018,3019,3020): | |
tmp[i] = u'ே' #chr(3015) | |
if i <= limit: | |
backup = tmp[i] | |
tmp[i]=tmp[i-1] | |
tmp[i-1]= backup | |
tmp.insert(i+1,u'ா') | |
i=i+1 | |
if ord(t)==3020: | |
if ord(tmp[i-1]) not in (3018,3019,3020): | |
tmp[i] = u'ெ' #chr(3015) | |
#print "T is obc7",i, limit | |
if i <= limit: | |
#print "swapping NNNNNNNNN" | |
backup = tmp[i] | |
tmp[i]=tmp[i-1] | |
tmp[i-1]= backup | |
tmp.insert(i+1,u'ள') | |
i=i+1 | |
i=i+1 | |
text= ''.join(tmp) | |
#selvam ends | |
assignments = state.assignments | |
subsets = state.subsets | |
#selvam starts | |
tmp = [] | |
myset = map(ord,text) | |
for sel,code in enumerate(myset): | |
tmp = [] | |
#NEWNEW | |
sel_map = {'2965_3007':65537,'2966_3007':65538,'2970_3007':65539,'2972_3007':65540,'2974_3007':65541,'2975_3007':65542,'2979_3007':65543,'2980_3007':65544,'2984_3007':65545,'2985_3007':65546, | |
'2986_3007':65547,'2990_3007':65548,'2991_3007':65549,'2992_3007':65550,'2993_3007':65551,'2994_3007':65552,'2995_3007':65553,'2996_3007':65554,'2997_3007':65555,'2999_3007':65556, | |
'3000_3007':65557,'3001_3007':65558,'2965_3008':65559,'2969_3008':65560,'2970_3008':65561,'2972_3008':65562,'2974_3008':65563,'2975_3008':65564,'2979_3008':65565,'2980_3008':65566, | |
'2984_3008':65567,'2985_3008':65568,'2986_3008':65569,'2990_3008':65570,'2991_3008':65571,'2992_3008':65572,'2993_3008':65573,'2994_3008':65574,'2995_3008':65575,'2996_3008':65576, | |
'2997_3008':65577,'2999_3008':65578,'3000_3008':65579,'3001_3008':65580,'2965_3009':65581,'2969_3009':65582,'2970_3009':65583,'2974_3009':65585,'2975_3009':65586,'2979_3009':65587, | |
'2980_3009':65588,'2984_3009':65589,'2985_3009':65590,'2986_3009':65591,'2990_3009':65592,'2991_3009':65593,'2992_3009':65594,'2993_3009':65595,'2994_3009':65596,'2995_3009':65597, | |
'2996_3009':65598,'2997_3009':65599,'2965_3010':65600,'2969_3010':65601,'2970_3010':65602,'2974_3010':65604,'2975_3010':65605,'2979_3010':65606,'2980_3010':65607,'2984_3010':65608, | |
'2985_3010':65609,'2986_3010':65610,'2990_3010':65611,'2991_3010':65612,'2992_3010':65613,'2993_3010':65614,'2994_3010':65615,'2995_3010':65616,'2996_3010':65617,'2997_3010':65618, | |
'2965_3021':65623,'2969_3021':65624,'2970_3021':65625,'2972_3021':65626,'2974_3021':65627,'2975_3021':65628,'2979_3021':65629,'2980_3021':65630,'2984_3021':65631,'2985_3021':65632, | |
'2986_3021':65633,'2990_3021':65634,'2991_3021':65635,'2992_3021':65636,'2993_3021':65637,'2994_3021':65638,'2995_3021':65639,'2996_3021':65640,'2997_3021':65641,'2999_3021':65642, | |
'3000_3021':65643,'3001_3021':65644,'2998_3021':65646,'2998_3008':65647,'2998_3007':65648} | |
for sel,code in enumerate(myset): | |
if len(myset)-1 > sel and ((myset[sel+1] >= 3007 and myset[sel+1] <= 3010) or myset[sel+1]==3021): | |
key = str(code)+"_"+str(myset[sel+1]) | |
if key in sel_map.keys(): | |
print "INside-----------" | |
tmp.append(sel_map[key]) | |
else: | |
#print "Code",code | |
if code not in (3007,3008,3009,3010,3021): | |
tmp.append(code) | |
else: | |
print "Skipping " | |
pass | |
text = ''.join(map(unichr,tmp)) | |
#selvam ends | |
for code in map(ord,text): | |
print code,"-CODE -----CODE" | |
#state.subsets[0] = tmp | |
#--------- | |
if assignments.has_key(code): | |
n = assignments[code] | |
#print "ttfonts.py 1051 if n=",n | |
else: | |
if state.frozen: | |
raise pdfdoc.PDFError, "Font %s is already frozen, cannot add new character U+%04X" % (self.fontName, code) | |
n = state.nextCode | |
#print "ttfonts.py 1051 else n=",n | |
if n&0xFF==32: | |
# make code 32 always be a space character | |
if n!=32: subsets[n >> 8].append(32) | |
state.nextCode += 1 | |
n = state.nextCode | |
state.nextCode += 1 | |
assignments[code] = n | |
if n>32: | |
if not(n&0xFF): subsets.append([]) | |
subsets[n >> 8].append(code) | |
print subsets,"--ooh" | |
else: | |
subsets[0][n] = code | |
if (n >> 8) != curSet: | |
if cur: | |
results.append((curSet, ''.join(map(chr,cur)))) | |
curSet = (n >> 8) | |
cur = [] | |
cur.append(n & 0xFF) | |
if cur: | |
results.append((curSet,''.join(map(chr,cur)))) | |
return results | |
def getSubsetInternalName(self, subset, doc): | |
"""Returns the name of a PDF Font object corresponding to a given | |
subset of this dynamic font. Use this function instead of | |
PDFDocument.getInternalFontName.""" | |
try: state = self.state[doc] | |
except KeyError: state = self.state[doc] = TTFont.State(self._asciiReadable) | |
if subset < 0 or subset >= len(state.subsets): | |
raise IndexError, 'Subset %d does not exist in font %s' % (subset, self.fontName) | |
if state.internalName is None: | |
state.internalName = state.namePrefix +`(len(doc.fontMapping) + 1)` | |
doc.fontMapping[self.fontName] = '/' + state.internalName | |
doc.delayedFonts.append(self) | |
return '/%s+%d' % (state.internalName, subset) | |
def addObjects(self, doc): | |
"""Makes one or more PDF objects to be added to the document. The | |
caller supplies the internal name to be used (typically F1, F2, ... in | |
sequence). | |
This method creates a number of Font and FontDescriptor objects. Every | |
FontDescriptor is a (no more than) 256 character subset of the original | |
TrueType font.""" | |
try: state = self.state[doc] | |
except KeyError: state = self.state[doc] = TTFont.State(self._asciiReadable) | |
print "ADD OBJECT ++++++++++++++++++++",state.subsets | |
state.frozen = 1 | |
for n,subset in enumerate(state.subsets): | |
internalName = self.getSubsetInternalName(n, doc)[1:] | |
baseFontName = "%s+%s%s" % (SUBSETN(n),self.face.name,self.face.subfontNameX) | |
print "baseFontName",baseFontName | |
print subset | |
pdfFont = pdfdoc.PDFTrueTypeFont() | |
pdfFont.__Comment__ = 'Font %s subset %d' % (self.fontName, n) | |
pdfFont.Name = internalName | |
pdfFont.BaseFont = baseFontName | |
pdfFont.FirstChar = 0 | |
pdfFont.LastChar = len(subset) - 1 | |
widths = map(self.face.getCharWidth, subset) | |
pdfFont.Widths = pdfdoc.PDFArray(widths) | |
cmapStream = pdfdoc.PDFStream() | |
cmapStream.content = makeToUnicodeCMap(baseFontName, subset) | |
#print cmapStream.content,"CCCCCccc" | |
if doc.compression: | |
cmapStream.filters = [pdfdoc.PDFZCompress] | |
pdfFont.ToUnicode = doc.Reference(cmapStream, 'toUnicodeCMap:' + baseFontName) | |
pdfFont.FontDescriptor = self.face.addSubsetObjects(doc, baseFontName, subset) | |
# link it in | |
ref = doc.Reference(pdfFont, internalName) | |
fontDict = doc.idToObject['BasicFonts'].dict | |
fontDict[internalName] = pdfFont | |
del self.state[doc] | |
try: | |
from _rl_accel import _instanceStringWidthTTF | |
import new | |
TTFont.stringWidth = new.instancemethod(_instanceStringWidthTTF,None,TTFont) | |
except ImportError: | |
pass |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment