This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
require "matrix" | |
require "set" | |
# Flood fill from min (up until max) stopping when we hit a cube. | |
def count_outward_faces(cubes, min, max) | |
# To avoid stack overflow, we aren't recursive, instead we have a list of squares to check. | |
to_check = [min] | |
to_check_set = Set[min] | |
# Keep track of squares we have tested (as you can get to the same square multiple ways). |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from torch import nn | |
from tqdm import tqdm | |
from torch import Tensor | |
from typing import Optional, List | |
import time | |
def subsequent_mask(size): | |
return torch.triu(torch.full((size, size), float('-inf')), diagonal=1) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import struct | |
from pathlib import Path | |
import wave | |
def parse_data(data): | |
# the first 24 bytes are the header | |
header = data[:24] | |
# the remaining bytes are the data |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from torch import nn | |
from tqdm import tqdm | |
def subsequent_mask(size): | |
return torch.triu(torch.full((size, size), float('-inf')), diagonal=1) | |
if __name__ == "__main__": |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import math | |
from torch import Tensor | |
from typing import Optional | |
def get_relative_positional_encoding(length1:int, length2:int, d_model:int, device:torch.device): | |
xs = torch.arange(length1, device=device).unsqueeze(1) | |
ys = torch.arange(length2, device=device).unsqueeze(0) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from math import log | |
import matplotlib.pyplot as plt | |
def get_positional_encoding(cycle_limit): | |
max_len = 5000 | |
d_model = 256 | |
position = torch.arange(max_len).unsqueeze(1) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import matplotlib.pyplot as plt | |
from pathlib import Path | |
from tqdm import tqdm | |
if __name__ == "__main__": | |
output_dir = Path('frames') | |
output_dir.mkdir(exist_ok=True) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch.nn as nn | |
import torch | |
# NOTE: I've just put this here so that I don't have to import any other part of your code base | |
# to try out / run this model | |
control_signals_labels = ['rhand', 'lhand', 'head'] | |
residual_block_linear = 1024 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from torch import nn | |
class MyModule(nn.Module): | |
def __init__(self, num_input_features): | |
super().__init__() | |
num_hidden = 5 | |
num_layers = 2 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from scipy.special import sph_harm | |
# Converted to numpy arrays in sph_harm | |
def real_sph_harm(m, l, phi, theta): | |
z = sph_harm(abs(m), l, phi, theta) | |
if m < 0: | |
z = 2**0.5 * (-1)**m * z.imag |
NewerOlder