let's suppose there are 4 commits on top of the latest master that we want to squash
commit 1
commit 2
commit 3
commit 4
origin/master, origin/HEAD, master
- rebase on master with SHA
let's suppose there are 4 commits on top of the latest master that we want to squash
commit 1
commit 2
commit 3
commit 4
origin/master, origin/HEAD, master
| #!/usr/bin/env python | |
| # coding: utf-8 | |
| # # [TVMConf 2020] BYOC Tutorial Demo | |
| # | |
| # Cody Yu ([email protected]), Zhi Chen ([email protected]) from AWS AI. | |
| # | |
| # | |
| # | |
| # This demo has two parts. In the first part, we use a simple Relay graph to walkthrough the BYOC workflow. In the second part, we build a SSD model with TensorRT BYOC integration to showcase a real world application. |
| There are two ways of invoking pdb | |
| python -m pdb <application>; Then set the breakpoint b <lineno> ; then run c | |
| write import pdb; pdb.set_trace() where you want the debugger to stop and just run python <applicatio> ; pdb will stop at that line | |
| common commands are | |
| a. w (where) to get stack trace | |
| b. c (continue) - it will run the app until the next breakpoint | |
| c. n (next) - will execute the current line and move to the next | |
| d. s (step) - will step into the current line if its a function |
| # Use C-o like we do in screen as action key | |
| #unbind C-b | |
| set -g prefix C-a | |
| #bind-key C-b last-window | |
| bind-key a send-prefix | |
| bind-key C-n next-window | |
| bind-key C-p previous-window | |
| # Terminal emulator window title | |
| set -g set-titles on |
| set tabstop=8 | |
| set expandtab | |
| set softtabstop=4 | |
| set shiftwidth=4 | |
| filetype plugin indent on | |
| set number | |
| set backspace=indent,start | |
| set hlsearch |
| import torch | |
| from torch import nn | |
| class autoencoder(nn.Module): | |
| def __init__(self,downsizing_factor=None,in_channels=1): | |
| self.downsize = downsizing_factor | |
| self.in_channels = in_channels | |
| super(autoencoder,self).__init__() | |
| conv_modules=[] | |
| self.in_channels = self.in_channels |
| import torch | |
| from torch import nn | |
| class autoencoder(nn.Module): | |
| def __init__(self,downsizing_factor=None,in_channels=1): | |
| self.downsize = downsizing_factor | |
| self.in_channels = in_channels | |
| super(autoencoder,self).__init__() | |
| conv_modules=[] | |
| self.in_channels = self.in_channels |
| import torch | |
| def pf1(output,target,metric=None): | |
| d = output.data | |
| t = target.data | |
| TP = torch.nonzero(d*t).size(0) | |
| TN = torch.nonzero((d - 1) * (t - 1)).size(0) | |
| FP = torch.nonzero(d * (t - 1)).size(0) | |
| FN = torch.nonzero((d - 1) * t).size(0) | |
| precision = TP / (TP + FP) | |
| recall = TP / (TP + FN) |
| import numpy as np | |
| def pf(output,target,metric=None): | |
| TP = np.count_nonzero(data*target) | |
| TN = np.count_nonzero((data - 1) * (target - 1)) | |
| FP = np.count_nonzero(data * (target - 1)) | |
| FN = np.count_nonzero((data - 1) * target) | |
| precision = TP / (TP + FP) | |
| recall = TP / (TP + FN) | |
| F1 = 2 * precision * recall / (precision + recall) | |
| accuracy = (TP+TN)/(TP+TN+FP+FN) |