Tested on NVIDIA RTX 4090, but these instructions also cover AMD and Mac in case you wanna try those.
This guide assumes you are running Linux (I ran this on Ubuntu).
Before you get excited:
def verify_ddp_weights_equal(model: torch.nn.Module, atol: float = 1e-5) -> None: | |
if hasattr(model, "module"): | |
model = model.module | |
world_size = get_world_size() | |
for name, param in model.named_parameters(): | |
gathered_param = gather(param).reshape((world_size, -1)) | |
absolute_diffs = (gathered_param[None, 0, :] - gathered_param).abs() | |
rank_params_eq = (absolute_diffs < atol).all() | |
assert rank_params_eq, f"❌ param [{name}] not equal - got max_absolute_diff={absolute_diffs.max()}" |
// SPDX-License-Identifier: MIT | |
pragma solidity ^0.8.22; | |
import {wadLn, unsafeWadMul, unsafeWadDiv} from "solmate/utils/SignedWadMath.sol"; | |
/// @notice Approximated principal branch of [Lambert W function](https://en.wikipedia.org/wiki/Lambert_W_function) | |
/// @dev Only supports the [1/e, 3+1/e] and [3+1/e, inf] interval | |
/// @dev Approximate [1/e, 3+1/e] with a lookup table weighted average | |
/// @dev Approximate and [3+1/e, inf] with ln(x) - ln(ln(x)) + ln(ln(x))/ln(x) | |
contract Lambert { |
import traceback | |
import openai | |
import sys | |
# list models | |
models = openai.Model.list() | |
def baka(error, character="tsundere",): | |
exc_type, exc_value, exc_traceback = sys.exc_info() | |
traceback_list = traceback.extract_tb(exc_traceback) |
Audience: I assume you heard of chatGPT, maybe played with it a little, and was imressed by it (or tried very hard not to be). And that you also heard that it is "a large language model". And maybe that it "solved natural language understanding". Here is a short personal perspective of my thoughts of this (and similar) models, and where we stand with respect to language understanding.
Around 2014-2017, right within the rise of neural-network based methods for NLP, I was giving a semi-academic-semi-popsci lecture, revolving around the story that achieving perfect language modeling is equivalent to being as intelligent as a human. Somewhere around the same time I was also asked in an academic panel "what would you do if you were given infinite compute and no need to worry about labour costs" to which I cockily responded "I would train a really huge language model, just to show that it doesn't solve everything!". We
coverage_report: | |
name: Generate coverage report | |
runs-on: ubuntu-latest | |
steps: | |
- uses: actions/checkout@v2 | |
- name: Install Foundry | |
uses: onbjerg/foundry-toolchain@v1 | |
with: | |
version: nightly |
// SPDX-License-Identifier: MIT | |
pragma solidity ^0.8.13; | |
library InsertionSort { | |
function sort(uint256[] memory list) internal pure { | |
// Algorithm: | |
// | |
// for i = 2 to n do | |
// for j = 1 to i − 1 do // NOTE: we init do an extra sub instead of <= | |
// if A[i] < A[j] then |
// UNTESTED! | |
object "BribeContract" { | |
code { | |
let size := datasize("BribeRuntime") | |
codecopy(0, dataoffset("BribeRuntime"), size) | |
return(0, size) | |
} | |
object "BribeRuntime" { | |
code { | |
// Bribe by sending ETH. To try and claim, send a tx without ETH. Assumes that a tx with |
pragma circom 2.0.0; | |
include "../node_modules/circomlib/circuits/mimcsponge.circom"; | |
template attestValidMove () { | |
signal input move; | |
signal input secret; | |
signal output moveAttestation; | |
component mimcAttestation = MiMCSponge(2, 220, 1); |
Crash course on smart contract auditing for vulnerability researchers
NOTE: This blog post has been moved to here