Created
September 28, 2024 22:54
-
-
Save jamesu/271ced258300d5a02b4395571c2b5dfd to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# tribes_extract_grid.rb | |
# | |
# Extracts tribes terrain data from ted volumes. | |
# Metadata will be outputted in JSON format. | |
# | |
# Copyright (C) 2024 James S Urquhart. | |
# Also makes use of LZH huffman decode tables and general decode logic. | |
# | |
require 'optparse' | |
require 'json' | |
require 'stringio' | |
$count = 0 | |
$pcount = 0 | |
$options = {} | |
class LZH | |
BUF_SIZE = 4096 | |
LOOK_AHEAD = 60 | |
THRESHOLD = 2 | |
NUL = BUF_SIZE | |
N_CHAR = (256 - THRESHOLD + LOOK_AHEAD) | |
TABLE_SIZE = (N_CHAR * 2 - 1) | |
ROOT = (TABLE_SIZE - 1) | |
MAX_FREQ = 0x8000 | |
D_CODE = [ | |
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | |
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | |
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, | |
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, | |
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, | |
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, | |
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, | |
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, | |
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, | |
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | |
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | |
0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, | |
0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, | |
0x0B, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B, | |
0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D, | |
0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F, | |
0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11, | |
0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13, | |
0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15, | |
0x16, 0x16, 0x16, 0x16, 0x17, 0x17, 0x17, 0x17, | |
0x18, 0x18, 0x19, 0x19, 0x1A, 0x1A, 0x1B, 0x1B, | |
0x1C, 0x1C, 0x1D, 0x1D, 0x1E, 0x1E, 0x1F, 0x1F, | |
0x20, 0x20, 0x21, 0x21, 0x22, 0x22, 0x23, 0x23, | |
0x24, 0x24, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, | |
0x28, 0x28, 0x29, 0x29, 0x2A, 0x2A, 0x2B, 0x2B, | |
0x2C, 0x2C, 0x2D, 0x2D, 0x2E, 0x2E, 0x2F, 0x2F, | |
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, | |
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, | |
] | |
attr_accessor :getbuf, :getlen, :putbuf, :putlen, :textsize, :codesize, :printcount, :match_position, :match_length | |
attr_accessor :lson, :dad, :rson, :text_buf, :freq, :prnt, :son | |
def refill_byte_buf(ios) | |
while @getlen <= 8 | |
byte = ios.read(1) | |
if !byte.nil? | |
byte = byte.unpack('C')[0] | |
else | |
byte = 0 | |
end | |
#puts "BYTE R #{byte}" | |
@getbuf |= byte << (8 - @getlen) | |
@getbuf &= 0xFFFF | |
@getlen += 8 | |
end | |
end | |
def get_bit(ios) | |
refill_byte_buf(ios) | |
bit = @getbuf | |
#puts "get_bit buf=#{@getbuf} RETURNS #{(bit >> 15) & 0x1}" | |
@getbuf <<= 1 | |
@getbuf &= 0xFFFF | |
@getlen -= 1 | |
#puts "GetBit i=#{bit}" | |
(bit >> 15) & 0x1 | |
end | |
def get_byte(ios) | |
refill_byte_buf(ios) | |
byte = @getbuf | |
@getbuf <<= 8 | |
@getbuf &= 0xFFFF | |
@getlen -= 8 | |
byte >> 8 | |
end | |
def start_huff | |
@freq = Array.new(TABLE_SIZE + 1, 0) | |
@prnt = Array.new(TABLE_SIZE + N_CHAR, 0) | |
@son = Array.new(TABLE_SIZE, 0) | |
(0...N_CHAR).each do |i| | |
@freq[i] = 1 | |
@son[i] = (i + TABLE_SIZE) & 0xFFFF | |
@prnt[i + TABLE_SIZE] = i | |
end | |
i = 0 | |
j = N_CHAR | |
while j <= ROOT | |
@freq[j] = @freq[i] + @freq[i + 1] | |
@son[j] = i & 0xFFFF | |
@prnt[i] = @prnt[i + 1] = j | |
i += 2 | |
j += 1 | |
end | |
@freq[TABLE_SIZE] = 0xffff | |
@prnt[ROOT] = 0 | |
end | |
def decode_char(ios) | |
c = @son[ROOT] | |
#puts "DECODE_CHAR START=#{c}" | |
while c < TABLE_SIZE | |
oldC = c | |
c += get_bit(ios) | |
#puts " A #{c} PREV #{oldC}" | |
c = @son[c] | |
end | |
c -= TABLE_SIZE | |
update(c) | |
c | |
end | |
def decode_dlen(i) | |
if i < 32 | |
3 | |
elsif i < 80 | |
4 | |
elsif i < 144 | |
5 | |
elsif i < 192 | |
6 | |
elsif i < 240 | |
7 | |
else | |
8 | |
end | |
end | |
def decode_position(ios) | |
i = ob = get_byte(ios) | |
j = decode_dlen(i) | |
c = D_CODE[i] << 6 | |
# | |
j -= 2 | |
j.times { i = (i << 1) + get_bit(ios) } | |
#puts("DecodePosition #{$pcount} -> #{c} [byte=#{ob} len=#{decode_dlen(ob)}]") | |
$pcount += 1 | |
c | (i & 0x3f) | |
end | |
def lzh_unpack(text_size, in_stream, out_stream) | |
init_huff_and_tree | |
r = BUF_SIZE - LOOK_AHEAD | |
@text_buf = Array.new(BUF_SIZE + LOOK_AHEAD - 1, 0) | |
count = 0 | |
while count < text_size | |
c = decode_char(in_stream) | |
#puts "DECODE_CHAR #{c}" | |
#puts "#{count} / #{text_size}" | |
if c < 256 | |
#puts "YIP #{c}" | |
out_stream.write([c].pack('C')) | |
@text_buf[r] = c | |
r = (r + 1) | |
r &= (BUF_SIZE - 1) | |
count += 1 | |
else | |
#puts "YAP" | |
i = (r - decode_position(in_stream) - 1) & (BUF_SIZE - 1) | |
j = c - 255 + THRESHOLD | |
j.times do |k| | |
c = @text_buf[(i + k) & (BUF_SIZE - 1)] | |
#puts "WRITE #{c}" | |
out_stream.write([c].pack('C')) | |
@text_buf[r] = c | |
r += 1 | |
r &= (BUF_SIZE - 1) | |
count += 1 | |
end | |
end | |
end | |
end | |
private | |
def init_huff_and_tree | |
@getbuf = 0 | |
@getlen = 0 | |
@putlen = 0 | |
@putbuf = 0 | |
@codesize = 0 | |
@match_length = 0 | |
@textsize = 0 | |
start_huff | |
end | |
def update(c) | |
i = j = k = l = 0 | |
baseC = c | |
reconst if @freq[ROOT] == MAX_FREQ | |
c = @prnt[c + TABLE_SIZE] # Update 'c' with its parent node | |
begin | |
@freq[c] += 1 | |
k = @freq[c] | |
# Swap nodes to keep the tree frequency-ordered | |
l = c + 1 # Successor of 'c' | |
if k > @freq[l] | |
while k > @freq[l] | |
l += 1 # Increment l while 'k' is greater than 'l's frequency | |
end | |
l -= 1 # Adjust 'l' after loop | |
# Swap the frequencies | |
@freq[c], @freq[l] = @freq[l], @freq[c] | |
# Update the parent and child nodes accordingly | |
i = son[c] | |
@prnt[i] = l | |
@prnt[i + 1] = l if i < TABLE_SIZE # Handle the case for the next child | |
j = @son[l] | |
@son[l] = i & 0xFFFF | |
@prnt[j] = c | |
@prnt[j + 1] = c if j < TABLE_SIZE | |
@son[c] = j & 0xFFFF | |
c = l # Move to the new node | |
end | |
c = @prnt[c] | |
end while c != 0 | |
#puts "UPDATE #{baseC} #{$count}" | |
#puts @son.inspect | |
$count += 1 | |
end | |
def reconst | |
#puts "RECONST" | |
# Rebuild the frequency tree | |
j = 0 | |
(0...TABLE_SIZE).each do |i| | |
if @son[i] >= TABLE_SIZE | |
@freq[j] = (@freq[i] + 1) >> 1 | |
@son[j] = @son[i] | |
j += 1 | |
end | |
end | |
# connect sons | |
i = 0 | |
(N_CHAR...TABLE_SIZE).each do |j| | |
k = i + 1 | |
f = @freq[j] = @freq[i] + @freq[k] | |
k = j - 1 | |
k -= 1 while f < @freq[k] && k >= 0 | |
k += 1 | |
l = (j - k) * 2 | |
@freq[k + 1, l] = @freq[k, l].dup | |
@freq[k] = f | |
@son[k + 1, l] = @son[k, l].dup | |
@son[k] = i | |
i += 2 | |
end | |
# connect prnt | |
(0...TABLE_SIZE).each do |i| | |
k = @son[i] | |
if k >= TABLE_SIZE | |
@prnt[k] = i | |
else | |
@prnt[k] = i | |
@prnt[k + 1] = i | |
end | |
end | |
end | |
end | |
class IFFBlock | |
ALIGN_DWORD = 0x80000000 | |
attr_accessor :ident, :size | |
def initialize | |
@ident = 0 | |
@size = 0 | |
end | |
def get_size | |
if @size & ALIGN_DWORD != 0 | |
((@size & ~ALIGN_DWORD) + 3) & ~3 | |
else | |
(@size + 1) & ~1 | |
end | |
end | |
def get_raw_size | |
@size | |
end | |
def seek_to_end(start_pos, file) | |
file.seek(start_pos + get_size + 8, IO::SEEK_SET) | |
end | |
end | |
class BlockIndex | |
IDENT_GFIL = 1279870535 | |
PATTERN_ONETOALL = 0 | |
PATTERN_UNIQUE = 1 | |
PATTERN_MOSAIC = 2 | |
attr_accessor :ml_name, | |
:last_block_id, | |
:detail_count, | |
:scale, | |
:bounds, | |
:origin, | |
:range, | |
:size, | |
:pattern, | |
:block_map, | |
:block_list | |
def initialize() | |
@ml_name = nil | |
@last_block_id = 0 | |
@detail_count = 0 | |
@scale = 0 | |
@bounds = [[0,0,0], [0,0,0]] | |
@origin = [0,0] | |
@range = [0,0] | |
@size = [0,0] | |
@pattern = 0 | |
@block_map = [] | |
@block_list = [] | |
end | |
def get_block_info(pos) | |
scale_shift = (@detail_count - 1) + @scale | |
block_x = pos[0].to_i >> scale_shift | |
block_y = pos[1].to_i >> scale_shift | |
block = get_block_map_index(block_x, block_y) | |
return nil if block.nil? | |
return [pos[0] - (block_x << scale_shift), | |
pos[1] - (block_y << scale_shift), | |
block] | |
end | |
def get_block_map_index(x, y) | |
return @block_map[(y * @size[0]) + x] rescue nil | |
end | |
def each_block(&proc) | |
scale_shift = (@detail_count - 1) + @scale | |
(0...@size[1]).each do |y| | |
(0...@size[0]).each do |x| | |
bm = get_block_map_index(x, y) | |
next if bm.nil? | |
proc.call(x, y, x<<scale_shift, y<<scale_shift, bm) | |
end | |
end | |
end | |
def read_path(file_path) | |
File.open(file_path, 'rb') do |fp| | |
return read(fp) | |
end | |
end | |
def read(fp) | |
block = IFFBlock.new | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_GFIL | |
version = fp.read(4).unpack('L<')[0] | |
return false if version > 1 | |
mlname_size = fp.read(4).unpack('L<')[0] | |
@ml_name = fp.read(mlname_size).unpack('Z*')[0] | |
@last_block_id, @detail_count, @scale = fp.read(12).unpack('L<L<L<') | |
@bounds[0] = fp.read(12).unpack('eee') | |
@bounds[1] = fp.read(12).unpack('eee') | |
@origin = fp.read(8).unpack('l<l<') | |
@range = fp.read(8).unpack('ee') | |
@size = fp.read(8).unpack('l<l<') | |
if version > 0 | |
@pattern = fp.read(4).unpack('l<') | |
end | |
total_blocks = @size[0] * @size[1] | |
@block_map = fp.read(total_blocks*4).unpack('l<*') | |
list_size = fp.read(4).unpack('L<')[0] | |
@block_list = [] | |
(0...list_size).each do |i| | |
ident, name_size = fp.read(8).unpack('l<L<') | |
@block_list << [ident, fp.read(name_size).unpack('Z*')[0]] | |
end | |
end | |
true | |
end | |
class BlockData | |
IDENT_GBLK = 1263288903 | |
attr_accessor :block_ident, | |
:last_block_id, | |
:detail_count, | |
:range, | |
:size, | |
:height_map, | |
:mat_map, | |
:pin_map, | |
:light_scale, | |
:light_map, | |
:hires_light_map | |
def initialize | |
@block_ident = nil | |
@last_block_id = nil | |
@detail_count = nil | |
@range = nil | |
@size = nil | |
@height_map = nil | |
@mat_map = nil | |
@pin_map = nil | |
@light_scale = nil | |
@light_map = nil | |
@hires_light_map = nil | |
end | |
def read_compressed_data(io) | |
size = io.read(4).unpack('L<')[0] | |
#puts "DECOMP #{size}" | |
#puts packed_stream.read().unpack('H*') | |
decompressor = LZH.new | |
out_stream = StringIO.new | |
decompressor.lzh_unpack(size, io, out_stream) | |
out_stream.seek(0) | |
return out_stream | |
end | |
def read_path(file_path) | |
File.open(file_path, 'rb') do |fp| | |
return read(fp) | |
end | |
end | |
def read(fp) | |
block = IFFBlock.new | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_GBLK | |
version = fp.read(4).unpack('L<')[0] | |
@block_ident = fp.read(16).unpack('Z*') | |
@detail_count, @light_scale = fp.read(8).unpack('l<l<') | |
@range = fp.read(8).unpack('ee') | |
@size = fp.read(8).unpack('l<l<') | |
# Height map | |
if version == 0 | |
@height_map = f.read(4 * (@size[0] + 1) * (@size[1] + 1)).unpack('e*') | |
elsif version < 4 | |
# Row compression | |
row_size = @size[0] + 1 | |
# leading row | |
leading_row = fp.read(4 * row_size).unpack('e*') | |
rows = [leading_row] | |
(1...@size[1]).each do |y| | |
scale, lheight = fp.read(8).unpack('ee') | |
offsets = fp.read(@size[0]-1).unpack('c*') | |
row = [lheight] | |
row += offsets.map do |ch| | |
lheight += (ch.to_f * scale) | |
lheight | |
end | |
row << fp.read(4).unpack('e')[0] | |
rows << row | |
end | |
# trailing row | |
rows << fp.read(4 * row_size).unpack('e*') | |
@height_map = rows.flatten | |
else | |
stream = read_compressed_data(fp) | |
@height_map = stream.read(4 * (@size[0] + 1) * (@size[1] + 1)).unpack('e*') | |
end | |
# Material map | |
if version > 4 | |
stream = read_compressed_data(fp) | |
else | |
stream = fp | |
end | |
@mat_map = stream.read(@size[0] * @size[1] * 2).unpack('C*').each_slice(2).to_a | |
#puts "PINMAP=#{fp.tell}" | |
# Pin map | |
if version >= 2 | |
@pin_map = [] | |
(0...(11)).each do |count| | |
map_size = fp.read(2).unpack('S<')[0] | |
pmap = fp.read(map_size).unpack('C*') | |
@pin_map << pmap | |
end | |
end | |
if @light_scale >= 0 | |
if version > 4 | |
stream = read_compressed_data(fp) | |
else | |
stream = fp | |
end | |
lmap_width = (@size[0] << @light_scale) + 1 | |
@light_map = stream.read(lmap_width*lmap_width*2).unpack('S<*') | |
end | |
#puts "LMAP END=#{fp.tell}" | |
if version > 4 | |
lm_size = fp.read(4).unpack('L<')[0] | |
if lm_size > 0 | |
@hires_lightmap = fp.read(lm_size).unpack('C*') | |
raise Exception.new("HiRes lightmap data present") | |
end | |
end | |
if version >= 3 | |
version = fp.read(4).unpack('L<')[0] | |
if version > 0 | |
num_hrlms = fp.read(4).unpack('L<')[0] | |
else | |
num_hrlms = 0 | |
end | |
end | |
if version == 3 | |
cpool_size, idx_size, tree_size = fp.read(12).unpack('L<L<L<') | |
raise Exception.new("Can't read HiRes lightmap data") if cpool_size > 0 | |
end | |
true | |
end | |
end | |
class BlockFile | |
COMPRESS_NONE = 0 | |
COMPRESS_RO = 1 | |
COMPRESS_LZSS = 2 | |
COMPRESS_LZH = 3 | |
attr_accessor :files | |
class Entry | |
attr_accessor :id, :filename_ptr, :offset, :size, :compress_type | |
def initialize(id, filename_ptr, offset, size, compress_type) | |
@id = id | |
@filename_ptr = filename_ptr | |
@offset = offset | |
@size = size | |
@compress_type = compress_type | |
end | |
def get_filename(str_table) | |
if @filename_ptr >= 0 | |
str_table[@filename_ptr..-1].split("\0").first | |
else | |
"" | |
end | |
end | |
end | |
def initialize | |
@files = [] | |
@string_data = nil | |
@file = nil | |
end | |
def read_path(file_path) | |
File.open(file_path, 'rb') do |fp| | |
return read(fp) | |
end | |
end | |
def read(fp) | |
block = IFFBlock.new | |
#fp.read(8) # skip the header | |
# Read the IFF block and check for PVOL identifier | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_PVOL | |
# Skip to the next block | |
fp.seek(block.get_raw_size, IO::SEEK_SET) | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_VOLS | |
# Read string data | |
real_size = block.get_size | |
@string_data = fp.read(real_size) | |
# Read the VOLI block | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_VOLI | |
num_items = block.get_size / 17 | |
@files = num_items.times.map do | |
id, filename_ptr, offset, size, compress_type = fp.read(17).unpack('L<l<L<L<C') | |
Entry.new(id, filename_ptr, offset, size, compress_type) | |
end | |
true | |
end | |
def dump_filenames() | |
@files.map do |entry| | |
entry.get_filename(@string_data) | |
end | |
end | |
def open_stream(file_path, filename) | |
File.open(file_path, 'rb') do |fp| | |
@files.each do |entry| | |
if filename.casecmp(entry.get_filename(@string_data).strip).zero? | |
fp.seek(entry.offset + 8, IO::SEEK_SET) | |
data = fp.read(entry.size) | |
return data | |
end | |
end | |
end | |
nil | |
end | |
end | |
class Volume | |
IDENT_PVOL = 1280267856 | |
IDENT_VOLS = 1936486262 | |
IDENT_VOLI = 1768714102 | |
COMPRESS_NONE = 0 | |
COMPRESS_RLE = 1 | |
COMPRESS_LZSS = 2 | |
COMPRESS_LZH = 3 | |
attr_accessor :files | |
class Entry | |
attr_accessor :id, :filename_ptr, :offset, :size, :compress_type | |
def initialize(id, filename_ptr, offset, size, compress_type) | |
@id = id | |
@filename_ptr = filename_ptr | |
@offset = offset | |
@size = size | |
@compress_type = compress_type | |
end | |
def get_filename(str_table) | |
if @filename_ptr >= 0 | |
str_table[@filename_ptr..-1].split("\0").first | |
else | |
"" | |
end | |
end | |
end | |
def initialize | |
@files = [] | |
@string_data = nil | |
@file = nil | |
end | |
def read_path(file_path) | |
File.open(file_path, 'rb') do |fp| | |
return read(fp) | |
end | |
end | |
def read(fp) | |
block = IFFBlock.new | |
# Read the IFF block and check for PVOL identifier | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_PVOL | |
# Skip to the next block | |
fp.seek(block.get_raw_size, IO::SEEK_SET) | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_VOLS | |
# Read string data | |
real_size = block.get_size | |
@string_data = fp.read(real_size) | |
# Read the VOLI block | |
block.ident, block.size = fp.read(8).unpack('L<L<') | |
return false if block.ident != IDENT_VOLI | |
num_items = block.get_size / 17 | |
@files = num_items.times.map do | |
id, filename_ptr, offset, size, compress_type = fp.read(17).unpack('L<l<L<L<C') | |
Entry.new(id, filename_ptr, offset, size, compress_type) | |
end | |
# Print filenames | |
#@files.each do |entry| | |
# puts "FILE=" + entry.get_filename(@string_data) | |
#end | |
true | |
end | |
def dump_filenames() | |
@files.map do |entry| | |
entry.get_filename(@string_data) | |
end | |
end | |
def open_stream(file_path, filename) | |
File.open(file_path, 'rb') do |fp| | |
@files.each do |entry| | |
if filename.casecmp(entry.get_filename(@string_data).strip).zero? | |
fp.seek(entry.offset + 8, IO::SEEK_SET) # skip past VBLK header | |
data = fp.read(entry.size) | |
return data | |
end | |
end | |
end | |
nil | |
end | |
end | |
OptionParser.new do |opts| | |
opts.banner = "Usage: extract_tribes_grid.rb <file> [options]" | |
opts.on("-oFILE", "--output=FILE", "Base output folder or file") do |file| | |
$options[:output] = file | |
end | |
opts.on("-h", "--help", "Prints this help") do | |
puts opts | |
exit | |
end | |
end.parse! | |
if ARGV.empty? | |
puts "Error: You must specify a file!" | |
puts "Usage: extract_tribes_grid.rb <file> [options]" | |
exit 1 | |
end | |
def process(file) | |
ted_volume = Volume.new | |
raise Exception.new("Invalid volume") if !ted_volume.read_path(file) | |
if $options[:output].nil? | |
$options[:output] = $stdout | |
end | |
dtb_match = /^.*\.dtb$/ | |
dtf_match = /^.*\.dtf$/ | |
dtb_files = [] | |
dtf_files = [] | |
ted_volume.dump_filenames.each do |f| | |
if f.match(dtf_match) | |
dtf_files << f.strip | |
elsif f.match(dtb_match) | |
dtb_files << f.strip | |
end | |
end | |
if dtf_files.empty? | |
raise Exception.new("Could not find any index files") | |
elsif dtb_files.count > 1 | |
raise Exception.new("Too many index files") | |
elsif dtb_files.empty? | |
raise Exception.new("No block files present") | |
end | |
dtf_data = ted_volume.open_stream(file, dtf_files[0]) | |
raise Exception.new("Can't open index file") if dtf_data.nil? | |
dtf_stream = StringIO.new(dtf_data) | |
bindex = BlockIndex.new | |
raise Exception.new("Can't read index file") if !bindex.read(dtf_stream) | |
out_blocks = { | |
:ml_name => bindex.ml_name, | |
:last_block_id => bindex.last_block_id, | |
:detail_count => bindex.detail_count, | |
:scale => bindex.scale, | |
:bounds => bindex.bounds, | |
:origin => bindex.origin, | |
:range => bindex.range, | |
:size => bindex.size, | |
:pattern => bindex.pattern, | |
:block_map => bindex.block_map, | |
:block_list => bindex.block_list, | |
:block_data => {} | |
} | |
basen = File.basename(dtf_files[0], ".dtf") | |
bindex.block_list.each do |block| | |
fname = "#{basen}\##{block[0]}.dtb" | |
dtb_data = ted_volume.open_stream(file, fname) | |
raise Exception.new("Can't load block file #{fname}") if dtb_data.nil? | |
dtb_stream = StringIO.new(dtb_data) | |
bdata = BlockData.new | |
raise Exception.new("Problem reading block file #{fname}") if !bdata.read(dtb_stream) | |
out_block = { | |
:block_ident => bdata.block_ident, | |
:detail_count => bdata.detail_count, | |
:range => bdata.range, | |
:size => bdata.size, | |
:height_map => bdata.height_map, | |
:mat_map => bdata.mat_map, | |
:pin_map => bdata.pin_map, | |
:light_scale => bdata.light_scale, | |
:light_map => bdata.light_map, | |
} | |
out_blocks[:block_data][block[0]] = out_block | |
end | |
if !$options[:output].nil? | |
File.open($options[:output], 'w') do |f| | |
f.write(JSON.pretty_generate(out_blocks)) | |
end | |
else | |
$stdout.write(JSON.pretty_generate(out_blocks)) | |
end | |
end | |
process(ARGV[0]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment