This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from transformers import pipeline | |
from numpy import argmax | |
import shap | |
from transformers import RobertaConfig, RobertaModel | |
from transformers import RobertaTokenizer | |
import torch | |
import numpy as np | |
import scipy as sp | |
# import datasets | |
import pandas as pd |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
words = [{1: {'text': 'elephant', 'position': (320, 180)}}, | |
{2: {'text': 'wug', 'position': (220, 140)}}, | |
{3: {'text': 'dog', 'position': (320, 180)}}] | |
import pyactr as actr | |
environment = actr.Environment(focus_position=(0,0)) | |
lex_decision = actr.ACTRModel( | |
environment=environment, | |
automatic_visual_search=False, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import spacy | |
import numpy as np | |
nlp = spacy.load('en', disable=['ner']) | |
def tree_height(root): | |
""" | |
Find the maximum depth (height) of the dependency parse of a spacy sentence by starting with its root | |
Code adapted from https://stackoverflow.com/questions/35920826/how-to-find-height-for-non-binary-tree |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
"""Usage: | |
gradeEmailer.py [--name=NAME] [--password=PASSWORD] [--dry-run] <sender-email> <grades-path> | |
Options: | |
--name=NAME The name of the grader. | |
Used in the signature and From field of the email. | |
If not given, <sender-email> will be used. | |
--password=PASSWORD The password to the sender's email account. | |
If not given, it will be prompted for. | |
--dry-run To test, send emails to yourself instead of to the students. |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def cohortModel(word, EnglishWords): | |
soFar = '' # when we start, we haven't heard anything yet, so we'll represent that as an empty string | |
candidates = set(EnglishWords) # before we've heard anything, all the words we know are possible | |
for letter in word: # then start listening to the word letter by letter | |
soFar += letter # add the newly heard letter to the portion of the word heard so far | |
for word in set(candidates): # now look through the candidate words | |
if not word.startswith(soFar): # if the word seen so far is NOT consistent with a word we know | |
candidates.remove(word) # remove the word from the candidates | |
print("These are the possible words when we've heard {} so far:\n{}".format(str(soFar),str(candidates))) | |
return candidates |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
This is a script I wrote to help me select office hours for my spring 2016 TAship of | |
COGS 2201: Foundations in Cognitive Science at the University of Connecticut. | |
""" | |
# import a few packages that we'll need to structure the doodle data, change the working directory, | |
# and compute the pairs of dates/times. | |
import pandas as pd | |
import os, itertools |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
With 93/120/9 FFN with sigmoid hidden layer, and fixing the activations on | |
the output layer (make them sum to one, put on interval 0-1), and train for | |
5 epochs, only get score of 1.28 | |
""" | |
import pandas as pd | |
import os | |
from pybrain.tools.shortcuts import buildNetwork | |
from pybrain.supervised.trainers import BackpropTrainer |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Natural Language Toolkit: code_baseline_tagger | |
# functions from http://www.nltk.org/book/ch05.html | |
from nltk.corpus import brown | |
import nltk | |
def performance(cfd, wordlist): | |
lt = dict((word, cfd[word].max()) for word in wordlist) | |
baseline_tagger = nltk.UnigramTagger(model=lt, backoff=nltk.DefaultTagger('NN')) | |
return baseline_tagger.evaluate(brown.tagged_sents(categories='news')) |