Created
December 28, 2017 21:23
-
-
Save colesbury/25ba8a38186e6058c21b04f1ee0921b5 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include "Python.h" | |
#include "VariableType.h" | |
// generated from tools/autograd/templates/VariableType.cpp | |
#include "torch/csrc/autograd/variable.h" | |
#include "torch/csrc/autograd/function.h" | |
#include "torch/csrc/autograd/grad_mode.h" | |
#include "torch/csrc/autograd/saved_variable.h" | |
#include "torch/csrc/autograd/generated/Functions.h" | |
#include "torch/csrc/autograd/functions/tensor.h" | |
#include "torch/csrc/autograd/functions/basic_ops.h" | |
#include "torch/csrc/jit/tracer.h" | |
#include <initializer_list> | |
#include <iostream> | |
#include <functional> | |
#ifdef _MSC_VER | |
#ifdef Type | |
#undef Type | |
#endif | |
#endif | |
using namespace at; | |
using namespace torch::autograd::generated; | |
namespace torch { namespace autograd { | |
// Helper methods for working with Attributes (torch/csrc/jit/attributes.h) | |
// The overloaded accessors are convenient for the generated code (since we | |
// don't want to make the codegen do the dispatch manually) | |
static void setattr(jit::Node* n, jit::Symbol name, int64_t v) { n->i_(name, v); } | |
static void setattr(jit::Node* n, jit::Symbol name, const at::Scalar& v) { n->t_(name, v.toTensor()); } | |
static void setattr(jit::Node* n, jit::Symbol name, SparseTensor s) { n->t_(name, s.tref); } | |
static void setattr(jit::Node* n, jit::Symbol name, const at::IntList& v) { n->is_(name, v); } | |
static void setattr(jit::Node* n, jit::Symbol name, bool v) { n->i_(name, v); } | |
static void setattr(jit::Node* n, jit::Symbol name, double v) { n->f_(name, v); } | |
template<unsigned long N> | |
static void setattr(jit::Node* n, jit::Symbol name, std::array<bool, N> v) { n->is_(name, std::vector<int64_t>(v.begin(), v.end())); } | |
VariableType::VariableType(Context* context, Type* baseType) | |
: Type(context) | |
, baseType(baseType) { | |
str = std::string("Variable[") + baseType->toString() + "]"; | |
} | |
ScalarType VariableType::scalarType() const { | |
return baseType->scalarType(); | |
} | |
Backend VariableType::backend() const { | |
return baseType->backend(); | |
} | |
bool VariableType::is_cuda() const { return baseType->is_cuda(); } | |
bool VariableType::is_sparse() const { return baseType->is_sparse(); } | |
bool VariableType::is_distributed() const { return baseType->is_distributed(); } | |
std::unique_ptr<Storage> VariableType::storage() const { | |
return baseType->storage(); | |
} | |
std::unique_ptr<Storage> VariableType::storage(size_t size) const { | |
return baseType->storage(size); | |
} | |
std::unique_ptr<Storage> VariableType::storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const { | |
return baseType->storageFromBlob(data, size, deleter); | |
} | |
std::unique_ptr<Storage> VariableType::unsafeStorageFromTH(void * th_pointer, bool retain) const { | |
return baseType->unsafeStorageFromTH(th_pointer, retain); | |
} | |
std::unique_ptr<Storage> VariableType::storageWithAllocator(int64_t size, std::unique_ptr<Allocator> allocator) const { | |
return baseType->storageWithAllocator(size, std::move(allocator)); | |
} | |
Tensor VariableType::unsafeTensorFromTH(void * th_pointer, bool retain) const { | |
return make_variable(baseType->unsafeTensorFromTH(th_pointer, retain), false); | |
} | |
std::unique_ptr<Generator> VariableType::generator() const { | |
return baseType->generator(); | |
} | |
const char * VariableType::toString() const { | |
return str.c_str(); | |
} | |
size_t VariableType::elementSizeInBytes() const { | |
return baseType->elementSizeInBytes(); | |
} | |
Type & VariableType::toBackend(Backend b) const { | |
return *VariableImpl::getType(baseType->toBackend(b)); | |
} | |
Type & VariableType::toScalarType(ScalarType s) const { | |
return *VariableImpl::getType(baseType->toScalarType(s)); | |
} | |
TypeID VariableType::ID() const { | |
throw std::runtime_error("VariableType::ID() not implemented"); | |
} | |
const char * VariableType::typeString() { | |
return "VariableType"; | |
} | |
Variable & VariableType::checked_cast(const Type & type, const Tensor & t, const char * name, int pos) { | |
if(!t.defined()) { | |
runtime_error("Expected a Tensor of type %s but found an undefined Tensor for argument #%d '%s'", | |
type.toString(), pos, name); | |
} | |
if (&t.type() != &type && &t.type() != &type.toBackend(toSparse(t.type().backend()))) { | |
runtime_error("Expected object of type %s but found type %s for argument #%d '%s'", | |
type.toString(), t.type().toString(), pos, name); | |
} | |
return static_cast<Variable&>(const_cast<Tensor&>(t)); | |
} | |
Tensor & VariableType::unpack(const Tensor & t, const char * name, int pos) const { | |
return checked_cast(*this, t, name, pos).data(); | |
} | |
SparseTensor VariableType::unpack(SparseTensor t, const char * name, int pos) const { | |
auto backend = is_cuda() ? kSparseCUDA : kSparseCPU; | |
return SparseTensor(checked_cast(this->toBackend(backend), t.tref, name, pos).data()); | |
} | |
Tensor & VariableType::unpack_long(const Tensor & t, const char * name, int pos) const { | |
auto& type = *VariableImpl::getType(baseType->toScalarType(kLong)); | |
return checked_cast(type, t, name, pos).data(); | |
} | |
Tensor & VariableType::unpack_byte(const Tensor & t, const char * name, int pos) const { | |
auto& type = *VariableImpl::getType(baseType->toScalarType(kByte)); | |
return checked_cast(type, t, name, pos).data(); | |
} | |
Tensor & VariableType::unpack_any(const Tensor & t, const char * name, int pos) const { | |
if (!t.defined()) { | |
runtime_error("Expected a Tensor of type Variable but found an undefined Tensor for argument #%d '%s'", | |
pos, name); | |
} | |
auto scalarType = t.type().scalarType(); | |
auto backend = t.type().backend(); | |
auto& type = *VariableImpl::getType(baseType->toScalarType(scalarType).toBackend(backend)); | |
return checked_cast(type, t, name, pos).data(); | |
} | |
Tensor VariableType::unpack_opt(const Tensor & t, const char * name, int pos) const { | |
if(!t.defined()) { | |
return Tensor(); | |
} | |
return unpack(t, name, pos); | |
} | |
std::vector<at::Tensor> VariableType::unpack(at::TensorList tl, const char *name, int pos) const { | |
std::vector<at::Tensor> ret(tl.size()); | |
for (size_t i = 0; i < tl.size(); ++i) { | |
const auto &t = tl[i]; | |
if (!t.defined()) { | |
runtime_error("Expected a Tensor of type %s but found an undefined Tensor at position #%d " | |
"for iterable argument #%d '%s'", | |
toString(), i, pos, name); | |
} | |
if (&t.type() == this) { | |
ret[i] = static_cast<const Variable&>(t).data(); | |
} else { | |
runtime_error("Expected object of type %s but found type %s at position #%d " | |
"for iterable argument #%d '%s'", | |
toString(),t.type().toString(), i, pos, name); | |
} | |
} | |
return ret; | |
} | |
std::vector<at::Tensor> VariableType::unpack_idxs(at::TensorList tl, const char *name, int pos) const { | |
auto& longType = *VariableImpl::getType(baseType->toScalarType(kLong)); | |
auto& byteType = *VariableImpl::getType(baseType->toScalarType(kByte)); | |
std::vector<at::Tensor> ret(tl.size()); | |
for (size_t i = 0; i < tl.size(); ++i) { | |
const auto &t = tl[i]; | |
if (!t.defined()) { | |
continue; | |
} else if (!(t.type() == longType || t.type() == byteType)) { | |
runtime_error("Expected object of type %s or %s but found type %s at position #%d " | |
"for iterable argument #%d '%s'", | |
longType.toString(), byteType.toString(), t.type().toString(), | |
i, pos, name); | |
} else { | |
ret[i] = static_cast<const Variable&>(t).data(); | |
} | |
} | |
return ret; | |
} | |
static Variable as_variable(Tensor tensor) { | |
return make_variable(std::move(tensor)); | |
} | |
static std::tuple<Variable, Variable> | |
as_variable(std::tuple<Tensor, Tensor> tensors) { | |
return std::make_tuple<>( | |
make_variable(std::move(std::get<0>(tensors))), | |
make_variable(std::move(std::get<1>(tensors)))); | |
} | |
static std::tuple<Variable, Variable, Variable> | |
as_variable(std::tuple<Tensor, Tensor, Tensor> tensors) { | |
return std::make_tuple<>( | |
make_variable(std::move(std::get<0>(tensors))), | |
make_variable(std::move(std::get<1>(tensors))), | |
make_variable(std::move(std::get<2>(tensors)))); | |
} | |
static std::tuple<Variable, Variable, Variable, Variable> | |
as_variable(std::tuple<Tensor, Tensor, Tensor, Tensor> tensors) { | |
return std::make_tuple<>( | |
make_variable(std::move(std::get<0>(tensors))), | |
make_variable(std::move(std::get<1>(tensors))), | |
make_variable(std::move(std::get<2>(tensors))), | |
make_variable(std::move(std::get<3>(tensors)))); | |
} | |
static std::vector<Variable> as_variable(TensorList tl) { | |
std::vector<Variable> variables; | |
for (auto& t : tl) { | |
variables.emplace_back(make_variable(std::move(t))); | |
} | |
return variables; | |
} | |
static Variable as_view(Variable base, Tensor tensor) { | |
if (base.is_view()) { | |
base = base.base(); | |
} | |
return make_variable_view(std::move(base), std::move(tensor)); | |
} | |
static void ensure_no_aten_scalars(Tensor & data) { | |
if (data.defined() && data.dim() == 0) { | |
data.as_strided_({1}, {1}); | |
} | |
} | |
template<typename T> | |
static bool computes_grad_tmpl(T tensors) { | |
if (!GradMode::is_enabled()) { | |
return false; | |
} | |
for (const Tensor& tensor : tensors) { | |
auto& var = static_cast<const Variable&>(tensor); | |
if (var.defined() && var.requires_grad()) { | |
return true; | |
} | |
} | |
return false; | |
} | |
using TensorRef = std::reference_wrapper<const Tensor>; | |
using TensorRefList = std::initializer_list<TensorRef>; | |
// ArrayRef is not covariant, which means there is no | |
// implicit conversion between TensorList (aka ArrayRef<Tensor>) | |
// and ArrayRef<Variable>. What we do instead is manually | |
// construct a variable_list, which itself is implicitly convertible | |
// into an ArrayRef<Variable> (but don't return an ArrayRef<Variable>; | |
// ArrayRef is non-owning!) | |
static variable_list cast_tensor_list(const TensorList& tensors) { | |
// TODO: Eliminate the intermediate vector allocation | |
return variable_list(tensors.begin(), tensors.end()); | |
} | |
static bool compute_requires_grad(const TensorRefList& tensors) { | |
return computes_grad_tmpl(tensors); | |
} | |
static bool compute_requires_grad(TensorList tensors) { | |
return computes_grad_tmpl(tensors); | |
} | |
static void check_no_requires_grad(const Tensor& tensor, const char* name) { | |
auto& var = static_cast<const Variable&>(tensor); | |
if (var.defined() && var.requires_grad()) { | |
std::string msg = "the derivative for '"; | |
msg += name; | |
msg += "' is not implemented"; | |
throw std::runtime_error(msg); | |
} | |
} | |
static function_list compute_next_functions(const std::initializer_list<Tensor>& tensors) { | |
return Function::flags(tensors).next_functions; | |
} | |
static function_list compute_next_functions(TensorList tensors) { | |
return Function::flags(tensors).next_functions; | |
} | |
static void check_inplace(const Tensor& tensor) { | |
auto& var = static_cast<const Variable&>(tensor); | |
if (var.requires_grad() && var.is_leaf() && GradMode::is_enabled()) { | |
at::runtime_error( | |
"a leaf Variable that requires grad has been used in an in-place operation."); | |
} | |
} | |
static void rebase_history(Variable& var, std::shared_ptr<Function> grad_fn, int output_nr=0) { | |
if (!var.defined()) { | |
return; | |
} | |
if (grad_fn) { | |
grad_fn->num_inputs = 1; | |
var.rebase_history(output_nr, std::move(grad_fn)); | |
} | |
} | |
// var must be the only differentiable output of the function. Use the ArrayRef | |
// overload for functions with multiple differentiable outputs. | |
static void set_history(Variable& var, std::shared_ptr<Function> grad_fn, int output_nr=0) { | |
if (grad_fn) { | |
grad_fn->num_inputs = 1; | |
var.get()->output_nr = output_nr; | |
var.get()->_grad_fn = std::move(grad_fn); | |
} | |
} | |
static void set_history(at::ArrayRef<Variable> vl, std::shared_ptr<Function> grad_fn) { | |
if (grad_fn) { | |
grad_fn->num_inputs = vl.size(); | |
int64_t output_nr = 0; | |
for (auto& var : vl) { | |
if (!var.defined()) continue; | |
// TODO: combine this with the Variable construction | |
var.get()->output_nr = output_nr; | |
var.get()->_grad_fn = grad_fn; | |
output_nr++; | |
} | |
} | |
} | |
static variable_list flatten(const TensorList& tensors) { | |
return cast_tensor_list(tensors); | |
} | |
static variable_list flatten(const Tensor& x, const TensorList& y) { | |
std::vector<Variable> r; | |
r.reserve(1 + y.size()); | |
r.emplace_back(x); | |
r.insert(r.end(), y.begin(), y.end()); | |
return r; | |
} | |
static variable_list flatten(const Tensor& x, const TensorList& y, const Tensor& z) { | |
std::vector<Variable> r; | |
r.reserve(2 + y.size()); | |
r.emplace_back(x); | |
r.insert(r.end(), y.begin(), y.end()); | |
r.emplace_back(z); | |
return r; | |
} | |
static std::vector<Tensor> as_tensor_list(std::vector<Variable> &vars) { | |
std::vector<Tensor> tensors; | |
for (auto& v : vars) { | |
tensors.emplace_back(std::move(v)); | |
} | |
return tensors; | |
} | |
static void increment_version(const Tensor & t) { | |
auto& var = static_cast<const Variable&>(t); | |
var.version_counter().increment(); | |
} | |
static bool isFloatingPoint(ScalarType s) { | |
return s == kFloat || s == kDouble || s == kHalf; | |
} | |
Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool async) const { | |
// TODO: once copy is exposed in Declarations.yaml we may be able to bind | |
// it automatically | |
auto& self_ = unpack(self, "self", 0); | |
auto& src_ = unpack_any(src, "src", 1); | |
check_inplace(self); | |
std::shared_ptr<CopyBackwards> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, src }); | |
requires_grad &= isFloatingPoint(self.type().scalarType()); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CopyBackwards>(); | |
grad_fn->next_functions = compute_next_functions({ self, src }); | |
grad_fn->num_inputs = 1; | |
grad_fn->src_type = &src.type(); | |
grad_fn->src_device = src.is_cuda() ? src.get_device() : -1; | |
} | |
baseType->s_copy_(self_, src_, async); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), std::move(grad_fn)); | |
return self; | |
} | |
Tensor & VariableType::resize_(Tensor & self, IntList size) const { | |
auto& self_ = unpack(self, "self", 0); | |
if (static_cast<Variable&>(self).requires_grad()) { | |
at::runtime_error("cannot resize variables that require grad"); | |
} | |
baseType->resize_(self_, size); | |
return self; | |
} | |
Tensor & VariableType::resize_as_(Tensor & self, const Tensor & the_template) const { | |
return resize_(self, the_template.sizes()); | |
} | |
Tensor VariableType::contiguous(const Tensor & self) const { | |
unpack(self, "self", 0); | |
if (self.is_contiguous()) { | |
return self; | |
} | |
return self.clone(); | |
} | |
static std::vector<int64_t> to_arg_sizes(TensorList tensors, int64_t dim) { | |
std::vector<int64_t> arg_sizes(tensors.size()); | |
for (size_t i = 0; i < tensors.size(); ++i) { | |
arg_sizes[i] = tensors[i].size(dim); | |
} | |
return arg_sizes; | |
} | |
int64_t VariableType::storage_offset(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->storage_offset(self_); | |
} | |
Tensor VariableType::zeros(IntList size) const { | |
return as_variable(baseType->zeros(size)); | |
} | |
Tensor VariableType::zeros_like(const Tensor & input) const { | |
auto& input_ = unpack(input, "input", 0); | |
return as_variable(baseType->zeros_like(input_)); | |
} | |
Tensor VariableType::ones(IntList size) const { | |
return as_variable(baseType->ones(size)); | |
} | |
Tensor VariableType::ones_like(const Tensor & input) const { | |
auto& input_ = unpack(input, "input", 0); | |
return as_variable(baseType->ones_like(input_)); | |
} | |
int64_t VariableType::numel(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->numel(self_); | |
} | |
Tensor & VariableType::set_(Tensor & self, Storage & storage) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->set_(self_, storage); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::set_(Tensor & self, Storage & sourceStorage, int64_t storage_offset, IntList size, IntList stride) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->set_(self_, sourceStorage, storage_offset, size, stride); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::set_(Tensor & self, const Tensor & source) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& source_ = unpack(source, "source", 1); | |
baseType->set_(self_, source_); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::set_(Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->set_(self_); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::fill_(Tensor & self, Scalar value) const { | |
profiler::RecordFunction profiler("fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FillBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FillBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->fill_(self_, value); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "fill", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::fill_(Tensor & self, const Tensor & value) const { | |
throw std::runtime_error("VariableType::fill_ NYI"); | |
} | |
bool VariableType::is_contiguous(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->is_contiguous(self_); | |
} | |
bool VariableType::is_set_to(const Tensor & self, const Tensor & tensor) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor_ = unpack(tensor, "tensor", 1); | |
return baseType->is_set_to(self_, tensor_); | |
} | |
Tensor & VariableType::s_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) const { | |
profiler::RecordFunction profiler("masked_fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mask_ = unpack_byte(mask, "mask", 1); | |
check_inplace(self); | |
std::shared_ptr<MaskedFillBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaskedFillBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->mask_ = SavedVariable(mask, false); | |
} | |
baseType->s_masked_fill_(self_, mask_, value); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, mask )) { | |
jit::Node *n = jit::tracer::recordTrace( "masked_fill", { self, mask }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value) const { | |
throw std::runtime_error("VariableType::masked_fill_ NYI"); | |
} | |
Tensor & VariableType::s_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) const { | |
profiler::RecordFunction profiler("masked_scatter_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mask_ = unpack_byte(mask, "mask", 1); | |
auto& source_ = unpack(source, "source", 2); | |
check_inplace(self); | |
std::shared_ptr<MaskedScatterBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, source }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaskedScatterBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->mask_ = SavedVariable(mask, false); | |
grad_fn->source_sizes = source.sizes(); | |
} | |
baseType->s_masked_scatter_(self_, mask_, source_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, mask, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "masked_scatter", { self, mask, source }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::s_masked_select(const Tensor & self, const Tensor & mask) const { | |
profiler::RecordFunction profiler("masked_select"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mask_ = unpack_byte(mask, "mask", 1); | |
std::shared_ptr<MaskedSelectBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaskedSelectBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
grad_fn->mask_ = SavedVariable(mask, false); | |
} | |
auto ret = as_variable(baseType->s_masked_select(self_, mask_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, mask )) { | |
jit::Node *n = jit::tracer::recordTrace( "masked_select", { self, mask }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::transpose(const Tensor & self, int64_t dim0, int64_t dim1) const { | |
profiler::RecordFunction profiler("transpose"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TransposeBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TransposeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim0 = dim0; | |
grad_fn->dim1 = dim1; | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->transpose(self_, dim0, dim1)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "transpose", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim0"), dim0); | |
setattr(n, jit::stringToSymbol("dim1"), dim1); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::transpose_(Tensor & self, int64_t dim0, int64_t dim1) const { | |
profiler::RecordFunction profiler("transpose_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TransposeBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TransposeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim0 = dim0; | |
grad_fn->dim1 = dim1; | |
} | |
baseType->transpose_(self_, dim0, dim1); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "transpose", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("dim0"), dim0); | |
setattr(n, jit::stringToSymbol("dim1"), dim1); | |
} | |
return self; | |
} | |
Tensor VariableType::t(const Tensor & self) const { | |
profiler::RecordFunction profiler("t"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->t(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "t", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::t_(Tensor & self) const { | |
profiler::RecordFunction profiler("t_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->t_(self_); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "t", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::nonzero(const Tensor & self) const { | |
profiler::RecordFunction profiler("nonzero"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NonzeroBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NonzeroBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->nonzero(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "nonzero", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::clone(const Tensor & self) const { | |
profiler::RecordFunction profiler("clone"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CloneBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CloneBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->clone(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clone", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::view(const Tensor & self, IntList size) const { | |
profiler::RecordFunction profiler("view"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ViewBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ViewBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->view(self_, size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "view", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::index_select(const Tensor & self, int64_t dim, const Tensor & index) const { | |
profiler::RecordFunction profiler("index_select"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
std::shared_ptr<IndexSelectBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<IndexSelectBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
auto ret = as_variable(baseType->index_select(self_, dim, index_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_select", { self, index }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) const { | |
profiler::RecordFunction profiler("index_copy_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& source_ = unpack(source, "source", 3); | |
check_inplace(self); | |
std::shared_ptr<IndexCopyBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, source }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<IndexCopyBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->index_copy_(self_, dim, index_, source_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, index, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_copy", { self, index, source }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::take(const Tensor & self, const Tensor & index) const { | |
profiler::RecordFunction profiler("take"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 1); | |
std::shared_ptr<TakeBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TakeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
auto ret = as_variable(baseType->take(self_, index_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "take", { self, index }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) const { | |
profiler::RecordFunction profiler("put_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 1); | |
auto& source_ = unpack(source, "source", 2); | |
check_inplace(self); | |
std::shared_ptr<PutBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, source }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PutBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->index_ = SavedVariable(index, false); | |
grad_fn->source_info = source; | |
grad_fn->accumulate = accumulate; | |
} | |
baseType->put_(self_, index_, source_, accumulate); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, index, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "put", { self, index, source }, { self } ); | |
setattr(n, jit::stringToSymbol("accumulate"), accumulate); | |
} | |
return self; | |
} | |
Tensor & VariableType::index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) const { | |
profiler::RecordFunction profiler("index_add_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& source_ = unpack(source, "source", 3); | |
check_inplace(self); | |
std::shared_ptr<IndexAddBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, source }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<IndexAddBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->index_add_(self_, dim, index_, source_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, index, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_add", { self, index, source }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor & VariableType::index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) const { | |
profiler::RecordFunction profiler("index_fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
check_inplace(self); | |
std::shared_ptr<IndexFillBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<IndexFillBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->index_fill_(self_, dim, index_, value); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_fill", { self, index }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) const { | |
throw std::runtime_error("VariableType::index_fill_ NYI"); | |
} | |
Tensor VariableType::unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) const { | |
profiler::RecordFunction profiler("unfold"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UnfoldBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UnfoldBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dimension = dimension; | |
grad_fn->size = size; | |
grad_fn->step = step; | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->unfold(self_, dimension, size, step)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "unfold", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dimension"), dimension); | |
setattr(n, jit::stringToSymbol("size"), size); | |
setattr(n, jit::stringToSymbol("step"), step); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::range(Scalar start, Scalar end, Scalar step) const { | |
return as_variable(baseType->range(start, end, step)); | |
} | |
Tensor VariableType::arange(Scalar start, Scalar end, Scalar step) const { | |
return as_variable(baseType->arange(start, end, step)); | |
} | |
Tensor VariableType::arange(Scalar end) const { | |
return as_variable(baseType->arange(end)); | |
} | |
Tensor & VariableType::scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) const { | |
profiler::RecordFunction profiler("scatter_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& src_ = unpack(src, "src", 3); | |
check_inplace(self); | |
std::shared_ptr<ScatterBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, src }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ScatterBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self, src }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->scatter_(self_, dim, index_, src_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, index, src )) { | |
jit::Node *n = jit::tracer::recordTrace( "scatter", { self, index, src }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor & VariableType::scatter_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) const { | |
profiler::RecordFunction profiler("scatter_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
check_inplace(self); | |
std::shared_ptr<ScatterBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ScatterBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->scatter_(self_, dim, index_, value); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "scatter", { self, index }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) const { | |
profiler::RecordFunction profiler("scatter_add_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& src_ = unpack(src, "src", 3); | |
check_inplace(self); | |
std::shared_ptr<ScatterAddBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, src }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ScatterAddBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, src }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->scatter_add_(self_, dim, index_, src_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, index, src )) { | |
jit::Node *n = jit::tracer::recordTrace( "scatter_add", { self, index, src }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::gather(const Tensor & self, int64_t dim, const Tensor & index) const { | |
profiler::RecordFunction profiler("gather"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
std::shared_ptr<GatherBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GatherBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
auto ret = as_variable(baseType->gather(self_, dim, index_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "gather", { self, index }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
void* VariableType::data_ptr(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->data_ptr(self_); | |
} | |
bool VariableType::equal(const Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return baseType->equal(self_, other_); | |
} | |
Tensor VariableType::__and__(const Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
return as_variable(baseType->__and__(self_, other)); | |
} | |
Tensor VariableType::s___and__(const Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return as_variable(baseType->s___and__(self_, other_)); | |
} | |
Tensor & VariableType::__iand__(Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__iand__(self_, other); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::s___iand__(Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___iand__(self_, other_); | |
increment_version(self); | |
return self; | |
} | |
Tensor VariableType::__or__(const Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
return as_variable(baseType->__or__(self_, other)); | |
} | |
Tensor VariableType::s___or__(const Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return as_variable(baseType->s___or__(self_, other_)); | |
} | |
Tensor & VariableType::__ior__(Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__ior__(self_, other); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::s___ior__(Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___ior__(self_, other_); | |
increment_version(self); | |
return self; | |
} | |
Tensor VariableType::__xor__(const Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
return as_variable(baseType->__xor__(self_, other)); | |
} | |
Tensor VariableType::s___xor__(const Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return as_variable(baseType->s___xor__(self_, other_)); | |
} | |
Tensor & VariableType::__ixor__(Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__ixor__(self_, other); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::s___ixor__(Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___ixor__(self_, other_); | |
increment_version(self); | |
return self; | |
} | |
Tensor VariableType::__lshift__(const Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
return as_variable(baseType->__lshift__(self_, other)); | |
} | |
Tensor VariableType::s___lshift__(const Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return as_variable(baseType->s___lshift__(self_, other_)); | |
} | |
Tensor & VariableType::__ilshift__(Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__ilshift__(self_, other); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::s___ilshift__(Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___ilshift__(self_, other_); | |
increment_version(self); | |
return self; | |
} | |
Tensor VariableType::__rshift__(const Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
return as_variable(baseType->__rshift__(self_, other)); | |
} | |
Tensor VariableType::s___rshift__(const Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return as_variable(baseType->s___rshift__(self_, other_)); | |
} | |
Tensor & VariableType::__irshift__(Tensor & self, Scalar other) const { | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__irshift__(self_, other); | |
increment_version(self); | |
return self; | |
} | |
Tensor & VariableType::s___irshift__(Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___irshift__(self_, other_); | |
increment_version(self); | |
return self; | |
} | |
Tensor VariableType::lt(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("lt"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LtBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LtBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->lt(self_, other)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_lt(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("lt"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<LtBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LtBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->s_lt(self_, other_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::lt_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("lt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LtBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LtBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->lt_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_lt_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("lt_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<LtBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LtBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_lt_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::gt(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("gt"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<GtBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GtBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->gt(self_, other)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_gt(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("gt"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<GtBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GtBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->s_gt(self_, other_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::gt_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("gt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<GtBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GtBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->gt_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_gt_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("gt_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<GtBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GtBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_gt_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::le(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("le"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->le(self_, other)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_le(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("le"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<LeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->s_le(self_, other_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::le_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("le_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->le_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_le_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("le_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<LeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_le_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::ge(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ge"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<GeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->ge(self_, other)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_ge(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ge"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<GeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->s_ge(self_, other_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::ge_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ge_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<GeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->ge_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_ge_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ge_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<GeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_ge_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::eq(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("eq"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<EqBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EqBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->eq(self_, other)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_eq(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("eq"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<EqBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EqBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->s_eq(self_, other_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::eq_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("eq_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<EqBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EqBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->eq_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_eq_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("eq_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<EqBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EqBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_eq_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::ne(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ne"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->ne(self_, other)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_ne(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ne"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<NeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->s_ne(self_, other_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::ne_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ne_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<NeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->ne_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_ne_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ne_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<NeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_ne_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
std::tuple<Tensor,Tensor> VariableType::min(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MinBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MinBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->min(self_, dim, keepdim)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "min", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
auto& min_indices = std::get<1>(ret); | |
grad_fn->min_indices_ = SavedVariable(min_indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::s_min(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<MinBackward2> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MinBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto ret = as_variable(baseType->s_min(self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "min", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::min(const Tensor & self) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MinBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MinBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->min(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "min", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::max(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->max(self_, dim, keepdim)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
auto& max_indices = std::get<1>(ret); | |
grad_fn->max_indices_ = SavedVariable(max_indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::s_max(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<MaxBackward2> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto ret = as_variable(baseType->s_max(self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "max", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::max(const Tensor & self) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->max(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("kthvalue"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<KthvalueBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<KthvalueBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->kthvalue(self_, k, dim, keepdim)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "kthvalue", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("k"), k); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::mode(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("mode"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ModeBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ModeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->mode(self_, dim, keepdim)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mode", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::median(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("median"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MedianBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MedianBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->median(self_, dim, keepdim)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "median", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::median(const Tensor & self) const { | |
profiler::RecordFunction profiler("median"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MedianBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MedianBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->median(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "median", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::sort(const Tensor & self, int64_t dim, bool descending) const { | |
profiler::RecordFunction profiler("sort"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SortBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SortBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->sort(self_, dim, descending)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sort", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("descending"), descending); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
profiler::RecordFunction profiler("topk"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TopkBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TopkBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->topk(self_, k, dim, largest, sorted)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "topk", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("k"), k); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("largest"), largest); | |
setattr(n, jit::stringToSymbol("sorted"), sorted); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
bool VariableType::all(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->all(self_); | |
} | |
bool VariableType::any(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->any(self_); | |
} | |
int64_t VariableType::get_device(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->get_device(self_); | |
} | |
Tensor VariableType::abs(const Tensor & self) const { | |
profiler::RecordFunction profiler("abs"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AbsBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AbsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->abs(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "abs", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::abs_(Tensor & self) const { | |
profiler::RecordFunction profiler("abs_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AbsBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AbsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->abs_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "abs", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::sigmoid_(Tensor & self) const { | |
profiler::RecordFunction profiler("sigmoid_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SigmoidBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SigmoidBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->sigmoid_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sigmoid", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("sigmoid"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SigmoidBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SigmoidBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->sigmoid(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sigmoid", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::log_(Tensor & self) const { | |
profiler::RecordFunction profiler("log_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LogBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LogBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->log_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::log(const Tensor & self) const { | |
profiler::RecordFunction profiler("log"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LogBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LogBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->log(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::log1p_(Tensor & self) const { | |
profiler::RecordFunction profiler("log1p_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Log1PBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Log1PBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->log1p_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log1p", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::log1p(const Tensor & self) const { | |
profiler::RecordFunction profiler("log1p"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Log1PBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Log1PBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->log1p(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log1p", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::lgamma(const Tensor & self) const { | |
profiler::RecordFunction profiler("lgamma"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LgammaBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LgammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->lgamma(self_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lgamma", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::lgamma_(Tensor & self) const { | |
profiler::RecordFunction profiler("lgamma_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LgammaBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LgammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->lgamma_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lgamma", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::exp_(Tensor & self) const { | |
profiler::RecordFunction profiler("exp_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ExpBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ExpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->exp_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "exp", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::exp(const Tensor & self) const { | |
profiler::RecordFunction profiler("exp"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ExpBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ExpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->exp(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "exp", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::expm1_(Tensor & self) const { | |
profiler::RecordFunction profiler("expm1_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Expm1Backward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Expm1Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->expm1_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "expm1", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::expm1(const Tensor & self) const { | |
profiler::RecordFunction profiler("expm1"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Expm1Backward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Expm1Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->expm1(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "expm1", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::cos_(Tensor & self) const { | |
profiler::RecordFunction profiler("cos_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CosBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->cos_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cos", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::cos(const Tensor & self) const { | |
profiler::RecordFunction profiler("cos"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CosBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->cos(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cos", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::acos_(Tensor & self) const { | |
profiler::RecordFunction profiler("acos_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AcosBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AcosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->acos_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "acos", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::acos(const Tensor & self) const { | |
profiler::RecordFunction profiler("acos"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AcosBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AcosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->acos(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "acos", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::cosh_(Tensor & self) const { | |
profiler::RecordFunction profiler("cosh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CoshBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CoshBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->cosh_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cosh", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::cosh(const Tensor & self) const { | |
profiler::RecordFunction profiler("cosh"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CoshBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CoshBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->cosh(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cosh", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::sin_(Tensor & self) const { | |
profiler::RecordFunction profiler("sin_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SinBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->sin_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sin", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::sin(const Tensor & self) const { | |
profiler::RecordFunction profiler("sin"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SinBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->sin(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sin", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::asin_(Tensor & self) const { | |
profiler::RecordFunction profiler("asin_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AsinBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AsinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->asin_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "asin", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::asin(const Tensor & self) const { | |
profiler::RecordFunction profiler("asin"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AsinBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AsinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->asin(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "asin", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::sinh_(Tensor & self) const { | |
profiler::RecordFunction profiler("sinh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SinhBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SinhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->sinh_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sinh", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::sinh(const Tensor & self) const { | |
profiler::RecordFunction profiler("sinh"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SinhBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SinhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->sinh(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sinh", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::tan_(Tensor & self) const { | |
profiler::RecordFunction profiler("tan_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TanBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->tan_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tan", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::tan(const Tensor & self) const { | |
profiler::RecordFunction profiler("tan"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TanBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->tan(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tan", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::atan_(Tensor & self) const { | |
profiler::RecordFunction profiler("atan_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AtanBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AtanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->atan_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::atan(const Tensor & self) const { | |
profiler::RecordFunction profiler("atan"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AtanBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AtanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->atan(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::tanh_(Tensor & self) const { | |
profiler::RecordFunction profiler("tanh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TanhBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TanhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->tanh_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tanh", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::tanh(const Tensor & self) const { | |
profiler::RecordFunction profiler("tanh"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TanhBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TanhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->tanh(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tanh", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::erf_(Tensor & self) const { | |
profiler::RecordFunction profiler("erf_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ErfBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ErfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->erf_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erf", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::erf(const Tensor & self) const { | |
profiler::RecordFunction profiler("erf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ErfBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ErfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->erf(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erf", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::erfinv_(Tensor & self) const { | |
profiler::RecordFunction profiler("erfinv_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ErfinvBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ErfinvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->erfinv_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erfinv", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::erfinv(const Tensor & self) const { | |
profiler::RecordFunction profiler("erfinv"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ErfinvBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ErfinvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->erfinv(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erfinv", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::sqrt_(Tensor & self) const { | |
profiler::RecordFunction profiler("sqrt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SqrtBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->sqrt_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sqrt", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::sqrt(const Tensor & self) const { | |
profiler::RecordFunction profiler("sqrt"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SqrtBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->sqrt(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sqrt", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::rsqrt_(Tensor & self) const { | |
profiler::RecordFunction profiler("rsqrt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RsqrtBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RsqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->rsqrt_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "rsqrt", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::rsqrt(const Tensor & self) const { | |
profiler::RecordFunction profiler("rsqrt"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RsqrtBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RsqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->rsqrt(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "rsqrt", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::ceil_(Tensor & self) const { | |
profiler::RecordFunction profiler("ceil_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CeilBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CeilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->ceil_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ceil", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::ceil(const Tensor & self) const { | |
profiler::RecordFunction profiler("ceil"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CeilBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CeilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->ceil(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ceil", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::floor_(Tensor & self) const { | |
profiler::RecordFunction profiler("floor_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FloorBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FloorBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->floor_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "floor", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::floor(const Tensor & self) const { | |
profiler::RecordFunction profiler("floor"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<FloorBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FloorBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->floor(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "floor", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::round_(Tensor & self) const { | |
profiler::RecordFunction profiler("round_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RoundBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RoundBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->round_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "round", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::round(const Tensor & self) const { | |
profiler::RecordFunction profiler("round"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RoundBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RoundBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->round(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "round", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::trunc_(Tensor & self) const { | |
profiler::RecordFunction profiler("trunc_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TruncBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TruncBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->trunc_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "trunc", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::trunc(const Tensor & self) const { | |
profiler::RecordFunction profiler("trunc"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TruncBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TruncBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->trunc(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "trunc", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::frac_(Tensor & self) const { | |
profiler::RecordFunction profiler("frac_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FracBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FracBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->frac_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "frac", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::frac(const Tensor & self) const { | |
profiler::RecordFunction profiler("frac"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<FracBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FracBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->frac(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "frac", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::mean(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("mean"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MeanBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MeanBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_argsize_dim = self.size(dim); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->mean(self_, dim, keepdim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::mean(const Tensor & self) const { | |
profiler::RecordFunction profiler("mean"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MeanBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MeanBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->mean(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::var(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
profiler::RecordFunction profiler("var"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<VarBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<VarBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->unbiased = unbiased; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->var(self_, dim, unbiased, keepdim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "var", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::var(const Tensor & self, bool unbiased) const { | |
profiler::RecordFunction profiler("var"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<VarBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<VarBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->unbiased = unbiased; | |
} | |
auto ret = as_variable(baseType->var(self_, unbiased)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "var", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::std(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
profiler::RecordFunction profiler("std"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<StdBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<StdBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->unbiased = unbiased; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->std(self_, dim, unbiased, keepdim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "std", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
auto& destination = ret; | |
grad_fn->destination_ = SavedVariable(destination, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::std(const Tensor & self, bool unbiased) const { | |
profiler::RecordFunction profiler("std"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<StdBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<StdBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->unbiased = unbiased; | |
} | |
auto ret = as_variable(baseType->std(self_, unbiased)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "std", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::norm(const Tensor & self, Scalar p, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("norm"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NormBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NormBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->p = p; | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->norm(self_, p, dim, keepdim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
auto& destination = ret; | |
grad_fn->destination_ = SavedVariable(destination, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::norm(const Tensor & self, Scalar p) const { | |
profiler::RecordFunction profiler("norm"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NormBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NormBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->p = p; | |
} | |
auto ret = as_variable(baseType->norm(self_, p)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const { | |
profiler::RecordFunction profiler("renorm"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RenormBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RenormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->p = p; | |
grad_fn->dim = dim; | |
grad_fn->maxnorm = maxnorm; | |
} | |
auto ret = as_variable(baseType->renorm(self_, p, dim, maxnorm)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "renorm", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("maxnorm"), maxnorm); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const { | |
profiler::RecordFunction profiler("renorm_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RenormBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RenormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->p = p; | |
grad_fn->dim = dim; | |
grad_fn->maxnorm = maxnorm; | |
} | |
baseType->renorm_(self_, p, dim, maxnorm); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "renorm", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("maxnorm"), maxnorm); | |
} | |
return self; | |
} | |
Tensor VariableType::s_dist(const Tensor & self, const Tensor & other, Scalar p) const { | |
profiler::RecordFunction profiler("dist"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<DistBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DistBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
grad_fn->p = p; | |
} | |
auto ret = as_variable(baseType->s_dist(self_, other_, p)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "dist", { self, other }, { ret } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
} | |
if (grad_fn) { | |
auto& result = ret; | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::reciprocal(const Tensor & self) const { | |
profiler::RecordFunction profiler("reciprocal"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReciprocalBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReciprocalBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->reciprocal(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reciprocal", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::reciprocal_(Tensor & self) const { | |
profiler::RecordFunction profiler("reciprocal_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ReciprocalBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReciprocalBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->reciprocal_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reciprocal", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::neg(const Tensor & self) const { | |
profiler::RecordFunction profiler("neg"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NegBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NegBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->neg(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "neg", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::neg_(Tensor & self) const { | |
profiler::RecordFunction profiler("neg_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<NegBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NegBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->neg_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "neg", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::s_atan2(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("atan2"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<Atan2Backward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Atan2Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto ret = as_variable(baseType->s_atan2(self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan2", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::s_atan2_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("atan2_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<Atan2Backward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Atan2Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_atan2_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan2", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::pow(const Tensor & self, Scalar exponent) const { | |
profiler::RecordFunction profiler("pow"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PowBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PowBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->exponent = exponent; | |
} | |
auto ret = as_variable(baseType->pow(self_, exponent)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("exponent"), exponent); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_pow(const Tensor & self, const Tensor & exponent) const { | |
profiler::RecordFunction profiler("pow"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& exponent_ = unpack(exponent, "exponent", 1); | |
std::shared_ptr<PowBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, exponent }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PowBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, exponent }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->exponent_ = SavedVariable(exponent, false); | |
} | |
auto ret = as_variable(baseType->s_pow(self_, exponent_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, exponent )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self, exponent }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::pow_(Tensor & self, Scalar exponent) const { | |
profiler::RecordFunction profiler("pow_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<PowBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PowBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->exponent = exponent; | |
} | |
baseType->pow_(self_, exponent); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("exponent"), exponent); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_pow_(Tensor & self, const Tensor & exponent) const { | |
profiler::RecordFunction profiler("pow_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& exponent_ = unpack(exponent, "exponent", 1); | |
check_inplace(self); | |
std::shared_ptr<PowBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, exponent }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PowBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, exponent }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->exponent_ = SavedVariable(exponent, false); | |
} | |
baseType->s_pow_(self_, exponent_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, exponent )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self, exponent }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::s_lerp(const Tensor & self, const Tensor & end, Scalar weight) const { | |
profiler::RecordFunction profiler("lerp"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& end_ = unpack(end, "end", 1); | |
std::shared_ptr<LerpBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, end }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LerpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, end }); | |
grad_fn->weight = weight; | |
} | |
auto ret = as_variable(baseType->s_lerp(self_, end_, weight)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, end )) { | |
jit::Node *n = jit::tracer::recordTrace( "lerp", { self, end }, { ret } ); | |
setattr(n, jit::stringToSymbol("weight"), weight); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::s_lerp_(Tensor & self, const Tensor & end, Scalar weight) const { | |
profiler::RecordFunction profiler("lerp_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& end_ = unpack(end, "end", 1); | |
check_inplace(self); | |
std::shared_ptr<LerpBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, end }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LerpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, end }); | |
grad_fn->weight = weight; | |
} | |
baseType->s_lerp_(self_, end_, weight); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, end )) { | |
jit::Node *n = jit::tracer::recordTrace( "lerp", { self, end }, { self } ); | |
setattr(n, jit::stringToSymbol("weight"), weight); | |
} | |
return self; | |
} | |
Tensor VariableType::linspace(Scalar start, Scalar end, int64_t steps) const { | |
return as_variable(baseType->linspace(start, end, steps)); | |
} | |
Tensor VariableType::logspace(Scalar start, Scalar end, int64_t steps) const { | |
return as_variable(baseType->logspace(start, end, steps)); | |
} | |
Tensor VariableType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("histc"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<HistcBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<HistcBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->histc(self_, bins, min, max)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "histc", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("bins"), bins); | |
setattr(n, jit::stringToSymbol("min"), min); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::zero_(Tensor & self) const { | |
profiler::RecordFunction profiler("zero_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ZeroBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ZeroBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->zero_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "zero", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::sum(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("sum"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SumBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SumBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto ret = as_variable(baseType->sum(self_, dim, keepdim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::sum(const Tensor & self) const { | |
profiler::RecordFunction profiler("sum"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SumBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SumBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto ret = as_variable(baseType->sum(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::prod(const Tensor & self, int64_t dim, bool keepdim) const { | |
throw std::runtime_error("VariableType::prod NYI"); | |
} | |
Tensor VariableType::prod(const Tensor & self) const { | |
profiler::RecordFunction profiler("prod"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ProdBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ProdBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->prod(self_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "prod", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cumsum(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("cumsum"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CumsumBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CumsumBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->cumsum(self_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cumsum", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cumprod(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("cumprod"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CumprodBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CumprodBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->cumprod(self_, dim)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cumprod", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::sign(const Tensor & self) const { | |
profiler::RecordFunction profiler("sign"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SignBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SignBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->sign(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sign", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::sign_(Tensor & self) const { | |
profiler::RecordFunction profiler("sign_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SignBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SignBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->sign_(self_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sign", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::trace(const Tensor & self) const { | |
profiler::RecordFunction profiler("trace"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TraceBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TraceBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto ret = as_variable(baseType->trace(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "trace", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::add(const Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AddBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->add(self_, other, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_add(const Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<AddBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
auto ret = as_variable(baseType->s_add(self_, other_, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self, other }, { ret } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::add(const Tensor & self, SparseTensor other, Scalar alpha) const { | |
throw std::runtime_error("VariableType::add NYI"); | |
} | |
Tensor & VariableType::add_(Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AddBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->add_(self_, other, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_add_(Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<AddBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
baseType->s_add_(self_, other_, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self, other }, { self } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor & VariableType::add_(Tensor & self, SparseTensor other, Scalar alpha) const { | |
throw std::runtime_error("VariableType::add_ NYI"); | |
} | |
Tensor VariableType::sub(const Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SubBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SubBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->sub(self_, other, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_sub(const Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<SubBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SubBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
auto ret = as_variable(baseType->s_sub(self_, other_, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self, other }, { ret } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::sub_(Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SubBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SubBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->sub_(self_, other, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_sub_(Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<SubBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SubBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
baseType->s_sub_(self_, other_, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self, other }, { self } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::mul(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("mul"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MulBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MulBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
auto ret = as_variable(baseType->mul(self_, other)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_mul(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("mul"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<MulBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MulBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto ret = as_variable(baseType->s_mul(self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::mul_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("mul_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<MulBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MulBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
baseType->mul_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_mul_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("mul_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<MulBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MulBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_mul_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::div(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("div"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<DivBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DivBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
auto ret = as_variable(baseType->div(self_, other)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_div(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("div"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<DivBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DivBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto ret = as_variable(baseType->s_div(self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::div_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("div_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<DivBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DivBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
baseType->div_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_div_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("div_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<DivBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DivBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_div_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::fmod(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("fmod"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<FmodBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FmodBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->fmod(self_, other)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_fmod(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("fmod"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<FmodBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FmodBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto ret = as_variable(baseType->s_fmod(self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::fmod_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("fmod_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FmodBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FmodBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->fmod_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_fmod_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("fmod_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<FmodBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FmodBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_fmod_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::remainder(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("remainder"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RemainderBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RemainderBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->remainder(self_, other)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_remainder(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("remainder"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_no_requires_grad(other, "other"); | |
std::shared_ptr<RemainderBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RemainderBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->s_remainder(self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::remainder_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("remainder_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RemainderBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RemainderBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->remainder_(self_, other); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_remainder_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("remainder_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
check_no_requires_grad(other, "other"); | |
std::shared_ptr<RemainderBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RemainderBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->s_remainder_(self_, other_); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::clamp(const Tensor & self, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("clamp"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ClampBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ClampBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->min = min; | |
grad_fn->max = max; | |
} | |
auto ret = as_variable(baseType->clamp(self_, min, max)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::clamp_(Tensor & self, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("clamp_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ClampBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ClampBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->min = min; | |
grad_fn->max = max; | |
} | |
baseType->clamp_(self_, min, max); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return self; | |
} | |
Tensor VariableType::clamp_min(const Tensor & self, Scalar min) const { | |
profiler::RecordFunction profiler("clamp_min"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ClampMinBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ClampMinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->min = min; | |
} | |
auto ret = as_variable(baseType->clamp_min(self_, min)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_min", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::clamp_min_(Tensor & self, Scalar min) const { | |
profiler::RecordFunction profiler("clamp_min_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ClampMinBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ClampMinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->min = min; | |
} | |
baseType->clamp_min_(self_, min); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_min", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
} | |
return self; | |
} | |
Tensor VariableType::clamp_max(const Tensor & self, Scalar max) const { | |
profiler::RecordFunction profiler("clamp_max"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ClampMaxBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ClampMaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->max = max; | |
} | |
auto ret = as_variable(baseType->clamp_max(self_, max)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_max", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::clamp_max_(Tensor & self, Scalar max) const { | |
profiler::RecordFunction profiler("clamp_max_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ClampMaxBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ClampMaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->max = max; | |
} | |
baseType->clamp_max_(self_, max); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_max", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return self; | |
} | |
Tensor VariableType::dot(const Tensor & self, const Tensor & tensor) const { | |
profiler::RecordFunction profiler("dot"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor_ = unpack(tensor, "tensor", 1); | |
std::shared_ptr<DotBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, tensor }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DotBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor }); | |
grad_fn->tensor_ = SavedVariable(tensor, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->dot(self_, tensor_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, tensor )) { | |
jit::Node *n = jit::tracer::recordTrace( "dot", { self, tensor }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::tril(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("tril"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TrilBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TrilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
auto ret = as_variable(baseType->tril(self_, diagonal)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tril", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::tril_(Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("tril_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TrilBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TrilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
baseType->tril_(self_, diagonal); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tril", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return self; | |
} | |
Tensor VariableType::triu(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("triu"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TriuBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TriuBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
auto ret = as_variable(baseType->triu(self_, diagonal)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "triu", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::triu_(Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("triu_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TriuBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TriuBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
baseType->triu_(self_, diagonal); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "triu", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return self; | |
} | |
Tensor VariableType::cross(const Tensor & self, const Tensor & other, int64_t dim) const { | |
profiler::RecordFunction profiler("cross"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<CrossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CrossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto ret = as_variable(baseType->cross(self_, other_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "cross", { self, other }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::eye(int64_t n, int64_t m) const { | |
return as_variable(baseType->eye(n, m)); | |
} | |
Tensor VariableType::diag(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("diag"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<DiagBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DiagBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
auto ret = as_variable(baseType->diag(self_, diagonal)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "diag", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat1_ = unpack(mat1, "mat1", 1); | |
auto& mat2_ = unpack(mat2, "mat2", 2); | |
std::shared_ptr<AddmmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, mat1, mat2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat1, mat2 }); | |
grad_fn->mat1_sizes = mat1.sizes(); | |
grad_fn->mat1_ = SavedVariable(mat1, false); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->mat2_sizes = mat2.sizes(); | |
grad_fn->beta = beta; | |
} | |
auto ret = as_variable(baseType->s_addmm(self_, mat1_, mat2_, beta, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, mat1, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmm", { self, mat1, mat2 }, { ret } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmm_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat1_ = unpack(mat1, "mat1", 1); | |
auto& mat2_ = unpack(mat2, "mat2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddmmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, mat1, mat2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat1, mat2 }); | |
grad_fn->mat1_sizes = mat1.sizes(); | |
grad_fn->mat1_ = SavedVariable(mat1, false); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->mat2_sizes = mat2.sizes(); | |
grad_fn->beta = beta; | |
} | |
baseType->addmm_(self_, mat1_, mat2_, beta, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, mat1, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmm", { self, mat1, mat2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat_ = unpack(mat, "mat", 1); | |
auto& vec_ = unpack(vec, "vec", 2); | |
std::shared_ptr<AddmvBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, mat, vec }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddmvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat, vec }); | |
grad_fn->vec_ = SavedVariable(vec, false); | |
grad_fn->alpha = alpha; | |
grad_fn->beta = beta; | |
grad_fn->mat_ = SavedVariable(mat, false); | |
} | |
auto ret = as_variable(baseType->s_addmv(self_, mat_, vec_, beta, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, mat, vec )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmv", { self, mat, vec }, { ret } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmv_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat_ = unpack(mat, "mat", 1); | |
auto& vec_ = unpack(vec, "vec", 2); | |
check_inplace(self); | |
std::shared_ptr<AddmvBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, mat, vec }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddmvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat, vec }); | |
grad_fn->vec_ = SavedVariable(vec, false); | |
grad_fn->alpha = alpha; | |
grad_fn->beta = beta; | |
grad_fn->mat_ = SavedVariable(mat, false); | |
} | |
baseType->addmv_(self_, mat_, vec_, beta, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, mat, vec )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmv", { self, mat, vec }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addr"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec1_ = unpack(vec1, "vec1", 1); | |
auto& vec2_ = unpack(vec2, "vec2", 2); | |
std::shared_ptr<AddrBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, vec1, vec2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec1, vec2 }); | |
grad_fn->beta = beta; | |
grad_fn->vec2_ = SavedVariable(vec2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->vec1_ = SavedVariable(vec1, false); | |
} | |
auto ret = as_variable(baseType->s_addr(self_, vec1_, vec2_, beta, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, vec1, vec2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addr", { self, vec1, vec2 }, { ret } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addr_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec1_ = unpack(vec1, "vec1", 1); | |
auto& vec2_ = unpack(vec2, "vec2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddrBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, vec1, vec2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec1, vec2 }); | |
grad_fn->beta = beta; | |
grad_fn->vec2_ = SavedVariable(vec2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->vec1_ = SavedVariable(vec1, false); | |
} | |
baseType->addr_(self_, vec1_, vec2_, beta, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, vec1, vec2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addr", { self, vec1, vec2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::ger(const Tensor & self, const Tensor & vec2) const { | |
profiler::RecordFunction profiler("ger"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec2_ = unpack(vec2, "vec2", 1); | |
std::shared_ptr<GerBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, vec2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GerBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec2 }); | |
grad_fn->vec2_ = SavedVariable(vec2, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->ger(self_, vec2_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, vec2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "ger", { self, vec2 }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::mv(const Tensor & self, const Tensor & vec) const { | |
profiler::RecordFunction profiler("mv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec_ = unpack(vec, "vec", 1); | |
std::shared_ptr<MvBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, vec }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec }); | |
grad_fn->vec_ = SavedVariable(vec, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->mv(self_, vec_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, vec )) { | |
jit::Node *n = jit::tracer::recordTrace( "mv", { self, vec }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::mm(const Tensor & self, const Tensor & mat2) const { | |
profiler::RecordFunction profiler("mm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat2_ = unpack(mat2, "mat2", 1); | |
std::shared_ptr<MmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, mat2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat2 }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->mat2_sizes = mat2.sizes(); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto ret = as_variable(baseType->mm(self_, mat2_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "mm", { self, mat2 }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::bmm(const Tensor & self, const Tensor & mat2) const { | |
profiler::RecordFunction profiler("bmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat2_ = unpack(mat2, "mat2", 1); | |
std::shared_ptr<BmmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, mat2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat2 }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
} | |
auto ret = as_variable(baseType->bmm(self_, mat2_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "bmm", { self, mat2 }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::s_addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addbmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
std::shared_ptr<AddbmmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, batch1, batch2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch1_argsize_0 = batch1.size(0); | |
grad_fn->batch1_argsize_1 = batch1.size(1); | |
grad_fn->batch2_argsize_2 = batch2.size(2); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
auto ret = as_variable(baseType->s_addbmm(self_, batch1_, batch2_, beta, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addbmm", { self, batch1, batch2 }, { ret } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addbmm_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddbmmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, batch1, batch2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch1_argsize_0 = batch1.size(0); | |
grad_fn->batch1_argsize_1 = batch1.size(1); | |
grad_fn->batch2_argsize_2 = batch2.size(2); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
baseType->addbmm_(self_, batch1_, batch2_, beta, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addbmm", { self, batch1, batch2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("baddbmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
std::shared_ptr<BaddbmmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, batch1, batch2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BaddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
auto ret = as_variable(baseType->s_baddbmm(self_, batch1_, batch2_, beta, alpha)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "baddbmm", { self, batch1, batch2 }, { ret } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("baddbmm_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
check_inplace(self); | |
std::shared_ptr<BaddbmmBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, batch1, batch2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BaddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
baseType->baddbmm_(self_, batch1_, batch2_, beta, alpha); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "baddbmm", { self, batch1, batch2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcmul"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
std::shared_ptr<AddcmulBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddcmulBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
auto ret = as_variable(baseType->s_addcmul(self_, tensor1_, tensor2_, value)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcmul", { self, tensor1, tensor2 }, { ret } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::s_addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcmul_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddcmulBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddcmulBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
baseType->s_addcmul_(self_, tensor1_, tensor2_, value); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcmul", { self, tensor1, tensor2 }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcdiv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
std::shared_ptr<AddcdivBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddcdivBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
auto ret = as_variable(baseType->s_addcdiv(self_, tensor1_, tensor2_, value)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcdiv", { self, tensor1, tensor2 }, { ret } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::s_addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcdiv_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddcdivBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AddcdivBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
baseType->s_addcdiv_(self_, tensor1_, tensor2_, value); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcdiv", { self, tensor1, tensor2 }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
std::tuple<Tensor,Tensor> VariableType::gesv(const Tensor & self, const Tensor & A) const { | |
profiler::RecordFunction profiler("gesv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& A_ = unpack(A, "A", 1); | |
std::shared_ptr<GesvBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, A }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GesvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, A }); | |
grad_fn->A_ = SavedVariable(A, false); | |
} | |
auto ret = as_variable(baseType->gesv(self_, A_)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self, A )) { | |
jit::Node *n = jit::tracer::recordTrace( "gesv", { self, A }, { std::get<0>(ret), std::get<1>(ret) } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& solution = std::get<0>(ret); | |
grad_fn->solution_ = SavedVariable(solution, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::gels(const Tensor & self, const Tensor & A) const { | |
profiler::RecordFunction profiler("gels"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& A_ = unpack(A, "A", 1); | |
std::shared_ptr<GelsBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, A }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GelsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, A }); | |
} | |
auto ret = as_variable(baseType->gels(self_, A_)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self, A )) { | |
jit::Node *n = jit::tracer::recordTrace( "gels", { self, A }, { std::get<0>(ret), std::get<1>(ret) } ); | |
(void)n; | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
profiler::RecordFunction profiler("trtrs"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& A_ = unpack(A, "A", 1); | |
std::shared_ptr<TrtrsBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, A }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TrtrsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, A }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->A_ = SavedVariable(A, false); | |
grad_fn->upper = upper; | |
grad_fn->transpose = transpose; | |
grad_fn->unitriangular = unitriangular; | |
} | |
auto ret = as_variable(baseType->trtrs(self_, A_, upper, transpose, unitriangular)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self, A )) { | |
jit::Node *n = jit::tracer::recordTrace( "trtrs", { self, A }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
setattr(n, jit::stringToSymbol("transpose"), transpose); | |
setattr(n, jit::stringToSymbol("unitriangular"), unitriangular); | |
} | |
if (grad_fn) { | |
auto& res1 = std::get<0>(ret); | |
grad_fn->res1_ = SavedVariable(res1, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::symeig(const Tensor & self, bool eigenvectors, bool upper) const { | |
profiler::RecordFunction profiler("symeig"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SymeigBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SymeigBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->symeig(self_, eigenvectors, upper)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "symeig", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::eig(const Tensor & self, bool eigenvectors) const { | |
profiler::RecordFunction profiler("eig"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<EigBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EigBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->eig(self_, eigenvectors)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "eig", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::svd(const Tensor & self, bool some) const { | |
profiler::RecordFunction profiler("svd"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SvdBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SvdBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->some = some; | |
} | |
auto ret = as_variable(baseType->svd(self_, some)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "svd", { self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("some"), some); | |
} | |
if (grad_fn) { | |
auto& res1 = std::get<0>(ret); | |
grad_fn->res1_ = SavedVariable(res1, true); | |
auto& res2 = std::get<1>(ret); | |
grad_fn->res2_ = SavedVariable(res2, true); | |
auto& res3 = std::get<2>(ret); | |
grad_fn->res3_ = SavedVariable(res3, true); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::inverse(const Tensor & self) const { | |
profiler::RecordFunction profiler("inverse"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<InverseBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<InverseBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->inverse(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "inverse", { self }, { ret } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::potrf(const Tensor & self, bool upper) const { | |
profiler::RecordFunction profiler("potrf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PotrfBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PotrfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->upper = upper; | |
} | |
auto ret = as_variable(baseType->potrf(self_, upper)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "potrf", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::potrs(const Tensor & self, const Tensor & input2, bool upper) const { | |
profiler::RecordFunction profiler("potrs"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& input2_ = unpack(input2, "input2", 1); | |
std::shared_ptr<PotrsBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, input2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PotrsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, input2 }); | |
} | |
auto ret = as_variable(baseType->potrs(self_, input2_, upper)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, input2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "potrs", { self, input2 }, { ret } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::potri(const Tensor & self, bool upper) const { | |
profiler::RecordFunction profiler("potri"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PotriBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PotriBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->potri(self_, upper)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "potri", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::pstrf(const Tensor & self, bool upper, Scalar tol) const { | |
profiler::RecordFunction profiler("pstrf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PstrfBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PstrfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->pstrf(self_, upper, tol)); | |
set_history({ std::get<0>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pstrf", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
setattr(n, jit::stringToSymbol("tol"), tol); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::qr(const Tensor & self) const { | |
profiler::RecordFunction profiler("qr"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<QrBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<QrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->qr(self_)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "qr", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
(void)n; | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::geqrf(const Tensor & self) const { | |
profiler::RecordFunction profiler("geqrf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<GeqrfBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GeqrfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->geqrf(self_)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "geqrf", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
(void)n; | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::orgqr(const Tensor & self, const Tensor & input2) const { | |
profiler::RecordFunction profiler("orgqr"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& input2_ = unpack(input2, "input2", 1); | |
std::shared_ptr<OrgqrBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, input2 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<OrgqrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, input2 }); | |
} | |
auto ret = as_variable(baseType->orgqr(self_, input2_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, input2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "orgqr", { self, input2 }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) const { | |
profiler::RecordFunction profiler("ormqr"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& input2_ = unpack(input2, "input2", 1); | |
auto& input3_ = unpack(input3, "input3", 2); | |
std::shared_ptr<OrmqrBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, input2, input3 }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<OrmqrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, input2, input3 }); | |
} | |
auto ret = as_variable(baseType->ormqr(self_, input2_, input3_, left, transpose)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, input2, input3 )) { | |
jit::Node *n = jit::tracer::recordTrace( "ormqr", { self, input2, input3 }, { ret } ); | |
setattr(n, jit::stringToSymbol("left"), left); | |
setattr(n, jit::stringToSymbol("transpose"), transpose); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::btrifact(const Tensor & self, bool pivot) const { | |
profiler::RecordFunction profiler("btrifact"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<BtrifactBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BtrifactBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->btrifact(self_, pivot)); | |
set_history({ std::get<0>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "btrifact", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("pivot"), pivot); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::btrifact_with_info(const Tensor & self, bool pivot) const { | |
profiler::RecordFunction profiler("btrifact_with_info"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<BtrifactWithInfoBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BtrifactWithInfoBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->btrifact_with_info(self_, pivot)); | |
set_history({ std::get<0>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "btrifact_with_info", { self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("pivot"), pivot); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) const { | |
profiler::RecordFunction profiler("btrisolve"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& LU_data_ = unpack(LU_data, "LU_data", 1); | |
auto& LU_pivots_ = unpack(LU_pivots, "LU_pivots", 2); | |
check_no_requires_grad(LU_data, "LU_data"); | |
check_no_requires_grad(LU_pivots, "LU_pivots"); | |
std::shared_ptr<BtrisolveBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BtrisolveBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->btrisolve(self_, LU_data_, LU_pivots_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, LU_data, LU_pivots )) { | |
jit::Node *n = jit::tracer::recordTrace( "btrisolve", { self, LU_data, LU_pivots }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::randperm(int64_t n, Generator * generator) const { | |
return as_variable(baseType->randperm(n, generator)); | |
} | |
Tensor & VariableType::random_(Tensor & self, int64_t from, int64_t to, Generator * generator) const { | |
profiler::RecordFunction profiler("random_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RandomBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RandomBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->random_(self_, from, to, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor & VariableType::random_(Tensor & self, int64_t to, Generator * generator) const { | |
profiler::RecordFunction profiler("random_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RandomBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RandomBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->random_(self_, to, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor & VariableType::random_(Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("random_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RandomBackward2> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RandomBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->random_(self_, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor VariableType::multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const { | |
profiler::RecordFunction profiler("multinomial"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MultinomialBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MultinomialBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->multinomial(self_, num_samples, replacement, generator)); | |
set_history(ret, grad_fn); | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::uniform_(Tensor & self, double from, double to, Generator * generator) const { | |
profiler::RecordFunction profiler("uniform_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<UniformBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UniformBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->uniform_(self_, from, to, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor VariableType::normal(const Tensor & mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& mean_ = unpack(mean, "mean", 0); | |
std::shared_ptr<NormalBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ mean }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NormalBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ mean }); | |
grad_fn->mean_sizes = mean.sizes(); | |
} | |
auto ret = as_variable(baseType->normal(mean_, std, generator)); | |
set_history(ret, grad_fn); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::normal(double mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& std_ = unpack(std, "std", 1); | |
std::shared_ptr<NormalBackward2> grad_fn; | |
auto requires_grad = compute_requires_grad({ std }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NormalBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ std }); | |
grad_fn->std_sizes = std.sizes(); | |
} | |
auto ret = as_variable(baseType->normal(mean, std_, generator)); | |
set_history(ret, grad_fn); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::normal(const Tensor & mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& mean_ = unpack(mean, "mean", 0); | |
auto& std_ = unpack(std, "std", 1); | |
std::shared_ptr<NormalBackward3> grad_fn; | |
auto requires_grad = compute_requires_grad({ mean, std }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NormalBackward3>(); | |
grad_fn->next_functions = compute_next_functions({ mean, std }); | |
grad_fn->mean_sizes = mean.sizes(); | |
grad_fn->std_sizes = std.sizes(); | |
} | |
auto ret = as_variable(baseType->normal(mean_, std_, generator)); | |
set_history(ret, grad_fn); | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::normal_(Tensor & self, double mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<NormalBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NormalBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->normal_(self_, mean, std, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor & VariableType::cauchy_(Tensor & self, double median, double sigma, Generator * generator) const { | |
profiler::RecordFunction profiler("cauchy_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CauchyBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CauchyBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->cauchy_(self_, median, sigma, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor & VariableType::log_normal_(Tensor & self, double mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("log_normal_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LogNormalBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LogNormalBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->log_normal_(self_, mean, std, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor & VariableType::exponential_(Tensor & self, double lambd, Generator * generator) const { | |
profiler::RecordFunction profiler("exponential_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ExponentialBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ExponentialBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->exponential_(self_, lambd, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor VariableType::rand(IntList size, Generator * generator) const { | |
return as_variable(baseType->rand(size, generator)); | |
} | |
Tensor VariableType::randn(IntList size, Generator * generator) const { | |
return as_variable(baseType->randn(size, generator)); | |
} | |
Tensor & VariableType::geometric_(Tensor & self, double p, Generator * generator) const { | |
profiler::RecordFunction profiler("geometric_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<GeometricBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GeometricBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->geometric_(self_, p, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
return self; | |
} | |
Tensor VariableType::bernoulli(const Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("bernoulli"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<BernoulliBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BernoulliBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->bernoulli(self_, generator)); | |
set_history(ret, grad_fn); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("_standard_gamma"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<StandardGammaBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<StandardGammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->_standard_gamma(self_, generator)); | |
set_history(ret, grad_fn); | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::tensor(Storage & storage, int64_t storageOffset, IntList size, IntList stride) const { | |
return as_variable(baseType->tensor(storage, storageOffset, size, stride)); | |
} | |
Tensor VariableType::tensor(IntList size) const { | |
return as_variable(baseType->tensor(size)); | |
} | |
Tensor VariableType::tensor(IntList size, IntList stride) const { | |
return as_variable(baseType->tensor(size, stride)); | |
} | |
Tensor VariableType::tensor() const { | |
return as_variable(baseType->tensor()); | |
} | |
Tensor VariableType::sparse_coo_tensor(const Tensor & indices, const Tensor & values) const { | |
throw std::runtime_error("VariableType::sparse_coo_tensor NYI"); | |
} | |
Tensor VariableType::alias(const Tensor & self) const { | |
profiler::RecordFunction profiler("alias"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AliasBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AliasBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->alias(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "alias", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::assign_(Tensor & self, const Tensor & src) const { | |
throw std::runtime_error("VariableType::assign_ NYI"); | |
} | |
Tensor & VariableType::_copy_ignoring_overlaps_(Tensor & self, const Tensor & src) const { | |
throw std::runtime_error("VariableType::_copy_ignoring_overlaps_ NYI"); | |
} | |
Tensor VariableType::as_strided(const Tensor & self, IntList size, IntList stride, int64_t storage_offset) const { | |
profiler::RecordFunction profiler("as_strided"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AsStridedBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AsStridedBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_geometry = TensorGeometry(self); | |
grad_fn->size = size; | |
grad_fn->stride = stride; | |
grad_fn->storage_offset = storage_offset; | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->as_strided(self_, size, stride, storage_offset)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "as_strided", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("storage_offset"), storage_offset); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::as_strided_(Tensor & self, IntList size, IntList stride, int64_t storage_offset) const { | |
profiler::RecordFunction profiler("as_strided_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AsStridedBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AsStridedBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_geometry = TensorGeometry(self); | |
grad_fn->size = size; | |
grad_fn->stride = stride; | |
grad_fn->storage_offset = storage_offset; | |
} | |
baseType->as_strided_(self_, size, stride, storage_offset); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "as_strided", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("storage_offset"), storage_offset); | |
} | |
return self; | |
} | |
Tensor VariableType::cat(TensorList tensors, int64_t dim) const { | |
profiler::RecordFunction profiler("cat"); | |
auto tensors_ = unpack(tensors, "tensors", 0); | |
std::shared_ptr<CatBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ tensors }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CatBackward>(); | |
grad_fn->next_functions = compute_next_functions({ tensors }); | |
grad_fn->tensors_sizes_dim = to_arg_sizes(tensors, dim); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->cat(tensors_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( tensors )) { | |
jit::Node *n = jit::tracer::recordTrace( "cat", flatten( tensors ), { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::reshape_(Tensor & self, IntList size, IntList stride) const { | |
throw std::runtime_error("VariableType::reshape_ NYI"); | |
} | |
Tensor VariableType::_sparse_mask(const Tensor & self, SparseTensor mask) const { | |
profiler::RecordFunction profiler("_sparse_mask"); | |
auto& self_ = unpack(self, "self", 0); | |
auto mask_ = unpack(mask, "mask", 1); | |
std::shared_ptr<SparseMaskBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SparseMaskBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->_sparse_mask(self_, mask_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_sparse_mask", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("mask"), mask); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_indices(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return as_variable(baseType->_indices(self_)); | |
} | |
Tensor VariableType::_values(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return as_variable(baseType->_values(self_)); | |
} | |
Tensor VariableType::binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
check_no_requires_grad(target, "target"); | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<BinaryCrossEntropyBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<BinaryCrossEntropyBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->binary_cross_entropy_forward(self_, target_, weight_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy", { self, target, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 3); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, target, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for binary_cross_entropy_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, target, weight }); | |
} | |
auto ret = as_variable(baseType->binary_cross_entropy_backward(grad_output_, self_, target_, weight_, size_average, reduce)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_backward", { grad_output, self, target, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::kl_div(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<KlDivBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<KlDivBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->kl_div_forward(self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "kl_div", { self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<KlDivBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<KlDivBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->kl_div_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "kl_div_backward", { grad_output, self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<L1LossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<L1LossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->l1_loss_forward(self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "l1_loss", { self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<L1LossBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<L1LossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->l1_loss_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "l1_loss_backward", { grad_output, self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::mse_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<MseLossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MseLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->mse_loss_forward(self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "mse_loss", { self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<MseLossBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MseLossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->mse_loss_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "mse_loss_backward", { grad_output, self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 4); | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<MultiMarginLossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MultiMarginLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->p = p; | |
grad_fn->margin = margin; | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
} | |
auto ret = as_variable(baseType->multi_margin_loss_forward(self_, target_, p, margin, weight_, size_average)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss", { self, target, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("margin"), margin); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::multi_margin_loss_backward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 4); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, target, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for multi_margin_loss_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, target, weight }); | |
} | |
auto ret = as_variable(baseType->multi_margin_loss_backward(self_, target_, p, margin, weight_, size_average)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_backward", { self, target, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("margin"), margin); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::multilabel_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto is_target = tensor(); | |
auto& is_target_ = static_cast<VariableImpl*>(is_target.get())->data; | |
std::shared_ptr<MultilabelMarginLossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MultilabelMarginLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->is_target_ = SavedVariable(is_target, false); | |
} | |
auto ret = as_variable(baseType->multilabel_margin_loss_forward(self_, target_, size_average, is_target_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss", { self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::multilabel_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average, const Tensor & is_target) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto& is_target_ = unpack(is_target, "is_target", 3); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, target, is_target }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for multilabel_margin_loss_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, target, is_target }); | |
} | |
auto ret = as_variable(baseType->multilabel_margin_loss_backward(self_, target_, size_average, is_target_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, target, is_target )) { | |
jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_backward", { self, target, is_target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
auto total_weight = tensor(); | |
auto& total_weight_ = static_cast<VariableImpl*>(total_weight.get())->data; | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<NllLossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NllLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
grad_fn->total_weight_ = SavedVariable(total_weight, false); | |
} | |
auto ret = as_variable(baseType->nll_loss_forward(self_, target_, weight_, size_average, ignore_index, reduce, total_weight_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss", { self, target, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
profiler::RecordFunction profiler("nll_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack_long(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 3); | |
auto& total_weight_ = unpack(total_weight, "total_weight", 7); | |
check_no_requires_grad(weight, "weight"); | |
check_no_requires_grad(total_weight, "total_weight"); | |
std::shared_ptr<NllLossBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NllLossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->nll_loss_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target, weight, total_weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss_backward", { grad_output, self, target, weight, total_weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss2d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
auto total_weight = tensor(); | |
auto& total_weight_ = static_cast<VariableImpl*>(total_weight.get())->data; | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<NllLoss2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NllLoss2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
grad_fn->total_weight_ = SavedVariable(total_weight, false); | |
} | |
auto ret = as_variable(baseType->nll_loss2d_forward(self_, target_, weight_, size_average, ignore_index, reduce, total_weight_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss2d", { self, target, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
profiler::RecordFunction profiler("nll_loss2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack_long(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 3); | |
auto& total_weight_ = unpack(total_weight, "total_weight", 7); | |
check_no_requires_grad(weight, "weight"); | |
check_no_requires_grad(total_weight, "total_weight"); | |
std::shared_ptr<NllLoss2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NllLoss2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->nll_loss2d_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target, weight, total_weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_backward", { grad_output, self, target, weight, total_weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::smooth_l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SmoothL1LossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SmoothL1LossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->smooth_l1_loss_forward(self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss", { self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SmoothL1LossBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SmoothL1LossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto ret = as_variable(baseType->smooth_l1_loss_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_backward", { grad_output, self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::soft_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SoftMarginLossBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftMarginLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
} | |
auto ret = as_variable(baseType->soft_margin_loss_forward(self_, target_, size_average)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss", { self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::soft_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SoftMarginLossBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftMarginLossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
} | |
auto ret = as_variable(baseType->soft_margin_loss_backward(self_, target_, size_average)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_backward", { self, target }, { ret } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::elu(const Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<EluBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EluBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->alpha = alpha; | |
grad_fn->scale = scale; | |
} | |
auto ret = as_variable(baseType->elu_forward(self_, alpha, scale)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, const Tensor & output) const { | |
profiler::RecordFunction profiler("elu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& output_ = unpack(output, "output", 3); | |
std::shared_ptr<EluBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, output }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
grad_fn->alpha = alpha; | |
grad_fn->scale = scale; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->elu_backward(grad_output_, alpha, scale, output_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu_backward", { grad_output, output }, { ret } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<EluBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<EluBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->alpha = alpha; | |
grad_fn->scale = scale; | |
} | |
baseType->elu_forward_(self_, alpha, scale); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::glu(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<GluBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GluBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->glu_forward(self_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "glu", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<GluBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<GluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->glu_backward(grad_output_, self_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "glu_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::hardshrink(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<HardshrinkBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<HardshrinkBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto ret = as_variable(baseType->hardshrink_forward(self_, lambd)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardshrink", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::hardshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<HardshrinkBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<HardshrinkBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto ret = as_variable(baseType->hardshrink_backward(grad_output_, self_, lambd)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardshrink_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<HardtanhBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<HardtanhBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->min_val = min_val; | |
grad_fn->max_val = max_val; | |
} | |
auto ret = as_variable(baseType->hardtanh_forward(self_, min_val, max_val)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<HardtanhBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<HardtanhBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->min_val = min_val; | |
grad_fn->max_val = max_val; | |
} | |
auto ret = as_variable(baseType->hardtanh_backward(grad_output_, self_, min_val, max_val)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<HardtanhBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<HardtanhBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->min_val = min_val; | |
grad_fn->max_val = max_val; | |
} | |
baseType->hardtanh_forward_(self_, min_val, max_val); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::leaky_relu(const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LeakyReluBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LeakyReluBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->negative_slope = negative_slope; | |
} | |
auto ret = as_variable(baseType->leaky_relu_forward(self_, negative_slope)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<LeakyReluBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LeakyReluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->negative_slope = negative_slope; | |
} | |
auto ret = as_variable(baseType->leaky_relu_backward(grad_output_, self_, negative_slope)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::leaky_relu_(Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LeakyReluBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LeakyReluBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->negative_slope = negative_slope; | |
} | |
baseType->leaky_relu_forward_(self_, negative_slope); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::log_sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("log_sigmoid"); | |
auto& self_ = unpack(self, "self", 0); | |
auto buffer = tensor(); | |
auto& buffer_ = static_cast<VariableImpl*>(buffer.get())->data; | |
std::shared_ptr<LogSigmoidBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LogSigmoidBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->buffer_ = SavedVariable(buffer, false); | |
} | |
auto ret = as_variable(baseType->log_sigmoid_forward(self_, buffer_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_sigmoid", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const { | |
profiler::RecordFunction profiler("log_sigmoid_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& buffer_ = unpack(buffer, "buffer", 2); | |
check_no_requires_grad(buffer, "buffer"); | |
std::shared_ptr<LogSigmoidBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LogSigmoidBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->buffer_ = SavedVariable(buffer, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->log_sigmoid_backward(grad_output_, self_, buffer_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, buffer )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_backward", { grad_output, self, buffer }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::log_softmax(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("log_softmax"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LogSoftmaxBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LogSoftmaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->log_softmax_forward(self_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_softmax", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::log_softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
profiler::RecordFunction profiler("log_softmax_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& output_ = unpack(output, "output", 3); | |
std::shared_ptr<LogSoftmaxBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<LogSoftmaxBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->dim = dim; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->log_softmax_backward(grad_output_, self_, dim, output_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_softmax_backward", { grad_output, self, output }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::prelu(const Tensor & self, const Tensor & weight) const { | |
profiler::RecordFunction profiler("prelu"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
std::shared_ptr<PreluBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PreluBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
} | |
auto ret = as_variable(baseType->prelu_forward(self_, weight_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "prelu", { self, weight }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, std::array<bool,2> output_mask) const { | |
profiler::RecordFunction profiler("prelu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<PreluBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PreluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
} | |
auto ret = as_variable(baseType->prelu_backward(grad_output_, self_, weight_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "prelu_backward", { grad_output, self, weight }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& noise_ = unpack(noise, "noise", 1); | |
check_no_requires_grad(noise, "noise"); | |
std::shared_ptr<RreluWithNoiseBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RreluWithNoiseBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->noise_ = SavedVariable(noise, false); | |
grad_fn->lower = lower; | |
grad_fn->upper = upper; | |
grad_fn->training = training; | |
} | |
auto ret = as_variable(baseType->rrelu_with_noise_forward(self_, noise_, lower, upper, training, generator)); | |
set_history(ret, grad_fn); | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& noise_ = unpack(noise, "noise", 2); | |
check_no_requires_grad(noise, "noise"); | |
std::shared_ptr<RreluWithNoiseBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RreluWithNoiseBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->noise_ = SavedVariable(noise, false); | |
grad_fn->lower = lower; | |
grad_fn->upper = upper; | |
grad_fn->training = training; | |
} | |
auto ret = as_variable(baseType->rrelu_with_noise_backward(grad_output_, self_, noise_, lower, upper, training)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, noise )) { | |
jit::Node *n = jit::tracer::recordTrace( "rrelu_with_noise_backward", { grad_output, self, noise }, { ret } ); | |
setattr(n, jit::stringToSymbol("lower"), lower); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
setattr(n, jit::stringToSymbol("training"), training); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& noise_ = unpack(noise, "noise", 1); | |
check_inplace(self); | |
check_no_requires_grad(noise, "noise"); | |
std::shared_ptr<RreluWithNoiseBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<RreluWithNoiseBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->noise_ = SavedVariable(noise, false); | |
grad_fn->lower = lower; | |
grad_fn->upper = upper; | |
grad_fn->training = training; | |
} | |
baseType->rrelu_with_noise_forward_(self_, noise_, lower, upper, training, generator); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::softmax(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("softmax"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SoftmaxBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftmaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->softmax_forward(self_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softmax", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
profiler::RecordFunction profiler("softmax_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& output_ = unpack(output, "output", 3); | |
std::shared_ptr<SoftmaxBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftmaxBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->softmax_backward(grad_output_, self_, dim, output_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "softmax_backward", { grad_output, self, output }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::softplus(const Tensor & self, Scalar beta, Scalar threshold) const { | |
profiler::RecordFunction profiler("softplus"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SoftplusBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftplusBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->beta = beta; | |
grad_fn->threshold = threshold; | |
} | |
auto ret = as_variable(baseType->softplus_forward(self_, beta, threshold)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softplus", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
} | |
if (grad_fn) { | |
auto& output = ret; | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
profiler::RecordFunction profiler("softplus_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& output_ = unpack(output, "output", 4); | |
std::shared_ptr<SoftplusBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftplusBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->beta = beta; | |
grad_fn->threshold = threshold; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->softplus_backward(grad_output_, self_, beta, threshold, output_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "softplus_backward", { grad_output, self, output }, { ret } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::softshrink(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SoftshrinkBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftshrinkBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto ret = as_variable(baseType->softshrink_forward(self_, lambd)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softshrink", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<SoftshrinkBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SoftshrinkBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto ret = as_variable(baseType->softshrink_backward(grad_output_, self_, lambd)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softshrink_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::threshold(const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ThresholdBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThresholdBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->threshold = threshold; | |
grad_fn->value = value; | |
} | |
auto ret = as_variable(baseType->threshold_forward(self_, threshold, value)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ThresholdBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThresholdBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->threshold = threshold; | |
grad_fn->value = value; | |
} | |
auto ret = as_variable(baseType->threshold_backward(grad_output_, self_, threshold, value)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::threshold_(Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ThresholdBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThresholdBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->threshold = threshold; | |
grad_fn->value = value; | |
} | |
baseType->threshold_forward_(self_, threshold, value); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::adaptive_avg_pool2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveAvgPool2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveAvgPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->adaptive_avg_pool2d_forward(self_, output_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AdaptiveAvgPool2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveAvgPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->adaptive_avg_pool2d_backward(grad_output_, self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_backward", { grad_output, self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::adaptive_avg_pool3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveAvgPool3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveAvgPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->adaptive_avg_pool3d_forward(self_, output_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AdaptiveAvgPool3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveAvgPool3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->adaptive_avg_pool3d_backward(grad_output_, self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_backward", { grad_output, self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveMaxPool2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveMaxPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->adaptive_max_pool2d_forward(self_, output_size)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<AdaptiveMaxPool2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveMaxPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->adaptive_max_pool2d_backward(grad_output_, self_, indices_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_backward", { grad_output, self, indices }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveMaxPool3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveMaxPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->adaptive_max_pool3d_forward(self_, output_size)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<AdaptiveMaxPool3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AdaptiveMaxPool3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->adaptive_max_pool3d_backward(grad_output_, self_, indices_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_backward", { grad_output, self, indices }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::avg_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AvgPool2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AvgPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
} | |
auto ret = as_variable(baseType->avg_pool2d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool2d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AvgPool2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AvgPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->avg_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::avg_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AvgPool3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AvgPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
} | |
auto ret = as_variable(baseType->avg_pool3d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool3d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AvgPool3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<AvgPool3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->avg_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::fractional_max_pool2d(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& random_samples_ = unpack(random_samples, "random_samples", 3); | |
check_no_requires_grad(random_samples, "random_samples"); | |
std::shared_ptr<FractionalMaxPool2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FractionalMaxPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->fractional_max_pool2d_forward(self_, kernel_size, output_size, random_samples_)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self, random_samples )) { | |
jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d", { self, random_samples }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 4); | |
std::shared_ptr<FractionalMaxPool2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<FractionalMaxPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->fractional_max_pool2d_backward(grad_output_, self_, kernel_size, output_size, indices_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_backward", { grad_output, self, indices }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxPool2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
grad_fn->ceil_mode = ceil_mode; | |
} | |
auto ret = as_variable(baseType->max_pool2d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool2d", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
profiler::RecordFunction profiler("max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 7); | |
std::shared_ptr<MaxPool2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->max_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool2d_backward", { grad_output, self, indices }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool3d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxPool3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
grad_fn->ceil_mode = ceil_mode; | |
} | |
auto ret = as_variable(baseType->max_pool3d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode)); | |
set_history(std::get<0>(ret), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool3d", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
if (grad_fn) { | |
auto& indices = std::get<1>(ret); | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
profiler::RecordFunction profiler("max_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 7); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, indices }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for max_pool3d_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, indices }); | |
} | |
auto ret = as_variable(baseType->max_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool3d_backward", { grad_output, self, indices }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::max_unpool2d(const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
std::shared_ptr<MaxUnpool2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxUnpool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->max_unpool2d_forward(self_, indices_, output_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool2d", { self, indices }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<MaxUnpool2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxUnpool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->output_size = output_size; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->max_unpool2d_backward(grad_output_, self_, indices_, output_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_backward", { grad_output, self, indices }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::max_unpool3d(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
std::shared_ptr<MaxUnpool3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<MaxUnpool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->output_size = output_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->max_unpool3d_forward(self_, indices_, output_size, stride, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool3d", { self, indices }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, indices }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for max_unpool3d_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, indices }); | |
} | |
auto ret = as_variable(baseType->max_unpool3d_backward(grad_output_, self_, indices_, output_size, stride, padding)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_backward", { grad_output, self, indices }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::reflection_pad1d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReflectionPad1DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReflectionPad1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->reflection_pad1d_forward(self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReflectionPad1DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReflectionPad1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->reflection_pad1d_backward(grad_output_, self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::reflection_pad2d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReflectionPad2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReflectionPad2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->reflection_pad2d_forward(self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReflectionPad2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReflectionPad2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->reflection_pad2d_backward(grad_output_, self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::replication_pad1d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReplicationPad1DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReplicationPad1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->replication_pad1d_forward(self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad1d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReplicationPad1DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReplicationPad1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->replication_pad1d_backward(grad_output_, self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::replication_pad2d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReplicationPad2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReplicationPad2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->replication_pad2d_forward(self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad2d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReplicationPad2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReplicationPad2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->replication_pad2d_backward(grad_output_, self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::replication_pad3d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReplicationPad3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReplicationPad3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->replication_pad3d_forward(self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad3d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReplicationPad3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ReplicationPad3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto ret = as_variable(baseType->replication_pad3d_backward(grad_output_, self_, padding)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_linear1d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleLinear1DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleLinear1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->upsample_linear1d_forward(self_, output_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
std::shared_ptr<UpsampleLinear1DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleLinear1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output }); | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->upsample_linear1d_backward(grad_output_, output_size, input_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_backward", { grad_output }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("input_size"), input_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_bilinear2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleBilinear2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleBilinear2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->upsample_bilinear2d_forward(self_, output_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
std::shared_ptr<UpsampleBilinear2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleBilinear2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output }); | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->upsample_bilinear2d_backward(grad_output_, output_size, input_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_backward", { grad_output }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("input_size"), input_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_trilinear3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleTrilinear3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleTrilinear3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->upsample_trilinear3d_forward(self_, output_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
std::shared_ptr<UpsampleTrilinear3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleTrilinear3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output }); | |
grad_fn->output_size = output_size; | |
} | |
auto ret = as_variable(baseType->upsample_trilinear3d_backward(grad_output_, output_size, input_size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_backward", { grad_output }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("input_size"), input_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_nearest1d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleNearest1DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleNearest1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto ret = as_variable(baseType->upsample_nearest1d_forward(self_, scale_factor)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_nearest1d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<UpsampleNearest1DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleNearest1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto ret = as_variable(baseType->upsample_nearest1d_backward(grad_output_, self_, scale_factor)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_nearest2d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleNearest2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleNearest2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto ret = as_variable(baseType->upsample_nearest2d_forward(self_, scale_factor)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_nearest2d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<UpsampleNearest2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleNearest2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto ret = as_variable(baseType->upsample_nearest2d_backward(grad_output_, self_, scale_factor)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_nearest3d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleNearest3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleNearest3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto ret = as_variable(baseType->upsample_nearest3d_forward(self_, scale_factor)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::upsample_nearest3d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<UpsampleNearest3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UpsampleNearest3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto ret = as_variable(baseType->upsample_nearest3d_backward(grad_output_, self_, scale_factor)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_backward", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_sigmoid(const Tensor & self) const { | |
throw std::runtime_error("VariableType::_sigmoid NYI"); | |
} | |
Tensor VariableType::_sigmoid_backward(const Tensor & grad_output, const Tensor & output) const { | |
profiler::RecordFunction profiler("_sigmoid_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& output_ = unpack(output, "output", 1); | |
std::shared_ptr<SigmoidBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, output }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SigmoidBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->_sigmoid_backward(grad_output_, output_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "_sigmoid_backward", { grad_output, output }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_tanh(const Tensor & self) const { | |
throw std::runtime_error("VariableType::_tanh NYI"); | |
} | |
Tensor VariableType::_tanh_backward(const Tensor & grad_output, const Tensor & output) const { | |
profiler::RecordFunction profiler("_tanh_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& output_ = unpack(output, "output", 1); | |
std::shared_ptr<TanhBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, output }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<TanhBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto ret = as_variable(baseType->_tanh_backward(grad_output_, output_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( grad_output, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "_tanh_backward", { grad_output, output }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::thnn_batch_norm(const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const { | |
profiler::RecordFunction profiler("thnn_batch_norm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto weight_ = unpack_opt(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
auto save_mean = tensor(); | |
auto& save_mean_ = static_cast<VariableImpl*>(save_mean.get())->data; | |
auto save_std = tensor(); | |
auto& save_std_ = static_cast<VariableImpl*>(save_std.get())->data; | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<ThnnBatchNormBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnBatchNormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->training = training; | |
grad_fn->eps = eps; | |
grad_fn->save_mean_ = SavedVariable(save_mean, false); | |
grad_fn->save_std_ = SavedVariable(save_std, false); | |
} | |
auto ret = as_variable(baseType->thnn_batch_norm_forward(self_, weight_, bias_, running_mean_, running_var_, training, momentum, eps, save_mean_, save_std_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias, running_mean, running_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm", { self, weight, bias, running_mean, running_var }, { ret } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("momentum"), momentum); | |
setattr(n, jit::stringToSymbol("eps"), eps); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_batch_norm_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, bool training, double eps, const Tensor & save_mean, const Tensor & save_std, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_batch_norm_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
auto& save_mean_ = unpack(save_mean, "save_mean", 7); | |
auto& save_std_ = unpack(save_std, "save_std", 8); | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<ThnnBatchNormBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight, save_mean, save_std }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnBatchNormBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, save_mean, save_std }); | |
grad_fn->save_mean_ = SavedVariable(save_mean, false); | |
grad_fn->save_std_ = SavedVariable(save_std, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->training = training; | |
grad_fn->eps = eps; | |
} | |
auto ret = as_variable(baseType->thnn_batch_norm_backward(grad_output_, self_, weight_, running_mean_, running_var_, training, eps, save_mean_, save_std_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, running_mean, running_var, save_mean, save_std )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_backward", { grad_output, self, weight, running_mean, running_var, save_mean, save_std }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("eps"), eps); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
auto columns = tensor(); | |
auto& columns_ = static_cast<VariableImpl*>(columns.get())->data; | |
auto ones = tensor(); | |
auto& ones_ = static_cast<VariableImpl*>(ones.get())->data; | |
std::shared_ptr<ThnnConvTranspose2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvTranspose2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
grad_fn->columns_ = SavedVariable(columns, false); | |
grad_fn->ones_ = SavedVariable(ones, false); | |
} | |
auto ret = as_variable(baseType->thnn_conv_transpose2d_forward(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation, columns_, ones_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& columns_ = unpack(columns, "columns", 8); | |
auto& ones_ = unpack(ones, "ones", 9); | |
check_no_requires_grad(columns, "columns"); | |
check_no_requires_grad(ones, "ones"); | |
std::shared_ptr<ThnnConvTranspose2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvTranspose2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
} | |
auto ret = as_variable(baseType->thnn_conv_transpose2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, output_padding, dilation, columns_, ones_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_backward", { grad_output, self, weight, columns, ones }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
auto finput = tensor(); | |
auto& finput_ = static_cast<VariableImpl*>(finput.get())->data; | |
auto fgrad_input = tensor(); | |
auto& fgrad_input_ = static_cast<VariableImpl*>(fgrad_input.get())->data; | |
std::shared_ptr<ThnnConvTranspose3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvTranspose3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
grad_fn->finput_ = SavedVariable(finput, false); | |
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, false); | |
} | |
auto ret = as_variable(baseType->thnn_conv_transpose3d_forward(self_, weight_, bias_, stride, padding, output_padding, dilation, finput_, fgrad_input_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& finput_ = unpack(finput, "finput", 7); | |
auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 8); | |
check_no_requires_grad(finput, "finput"); | |
check_no_requires_grad(fgrad_input, "fgrad_input"); | |
std::shared_ptr<ThnnConvTranspose3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvTranspose3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
} | |
auto ret = as_variable(baseType->thnn_conv_transpose3d_backward(grad_output_, self_, weight_, stride, padding, output_padding, dilation, finput_, fgrad_input_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_backward", { grad_output, self, weight, finput, fgrad_input }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::thnn_conv2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv2d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
auto finput = tensor(); | |
auto& finput_ = static_cast<VariableImpl*>(finput.get())->data; | |
auto fgrad_input = tensor(); | |
auto& fgrad_input_ = static_cast<VariableImpl*>(fgrad_input.get())->data; | |
std::shared_ptr<ThnnConv2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConv2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->finput_ = SavedVariable(finput, false); | |
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, false); | |
} | |
auto ret = as_variable(baseType->thnn_conv2d_forward(self_, weight_, kernel_size, bias_, stride, padding, finput_, fgrad_input_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& finput_ = unpack(finput, "finput", 6); | |
auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 7); | |
check_no_requires_grad(finput, "finput"); | |
check_no_requires_grad(fgrad_input, "fgrad_input"); | |
std::shared_ptr<ThnnConv2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConv2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->thnn_conv2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_backward", { grad_output, self, weight, finput, fgrad_input }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
std::shared_ptr<ThnnConvDepthwise2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvDepthwise2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
auto ret = as_variable(baseType->thnn_conv_depthwise2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<ThnnConvDepthwise2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvDepthwise2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
auto ret = as_variable(baseType->thnn_conv_depthwise2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_backward", { grad_output, self, weight }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::thnn_conv3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv3d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
auto finput = tensor(); | |
auto& finput_ = static_cast<VariableImpl*>(finput.get())->data; | |
auto fgrad_input = tensor(); | |
auto& fgrad_input_ = static_cast<VariableImpl*>(fgrad_input.get())->data; | |
std::shared_ptr<ThnnConv3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConv3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->finput_ = SavedVariable(finput, false); | |
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, false); | |
} | |
auto ret = as_variable(baseType->thnn_conv3d_forward(self_, weight_, kernel_size, bias_, stride, padding, finput_, fgrad_input_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& finput_ = unpack(finput, "finput", 6); | |
auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 7); | |
check_no_requires_grad(finput, "finput"); | |
check_no_requires_grad(fgrad_input, "fgrad_input"); | |
std::shared_ptr<ThnnConv3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConv3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
auto ret = as_variable(baseType->thnn_conv3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_backward", { grad_output, self, weight, finput, fgrad_input }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
auto columns = tensor(); | |
auto& columns_ = static_cast<VariableImpl*>(columns.get())->data; | |
auto ones = tensor(); | |
auto& ones_ = static_cast<VariableImpl*>(ones.get())->data; | |
std::shared_ptr<ThnnConvDilated2DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvDilated2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
grad_fn->columns_ = SavedVariable(columns, false); | |
grad_fn->ones_ = SavedVariable(ones, false); | |
} | |
auto ret = as_variable(baseType->thnn_conv_dilated2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation, columns_, ones_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& columns_ = unpack(columns, "columns", 7); | |
auto& ones_ = unpack(ones, "ones", 8); | |
check_no_requires_grad(columns, "columns"); | |
check_no_requires_grad(ones, "ones"); | |
std::shared_ptr<ThnnConvDilated2DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvDilated2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
auto ret = as_variable(baseType->thnn_conv_dilated2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_backward", { grad_output, self, weight, columns, ones }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
auto columns = tensor(); | |
auto& columns_ = static_cast<VariableImpl*>(columns.get())->data; | |
auto ones = tensor(); | |
auto& ones_ = static_cast<VariableImpl*>(ones.get())->data; | |
std::shared_ptr<ThnnConvDilated3DBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvDilated3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
grad_fn->columns_ = SavedVariable(columns, false); | |
grad_fn->ones_ = SavedVariable(ones, false); | |
} | |
auto ret = as_variable(baseType->thnn_conv_dilated3d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation, columns_, ones_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& columns_ = unpack(columns, "columns", 7); | |
auto& ones_ = unpack(ones, "ones", 8); | |
check_no_requires_grad(columns, "columns"); | |
check_no_requires_grad(ones, "ones"); | |
std::shared_ptr<ThnnConvDilated3DBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad_output, self, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ThnnConvDilated3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
auto ret = as_variable(baseType->thnn_conv_dilated3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_backward", { grad_output, self, weight, columns, ones }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::adaptive_avg_pool1d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool1d"); | |
auto ret = Type::adaptive_avg_pool1d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool1d", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool1d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool1d"); | |
auto ret = Type::adaptive_max_pool1d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool1d", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
bool VariableType::allclose(const Tensor & self, const Tensor & other, double rtol, double atol) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return baseType->allclose(self_, other_, rtol, atol); | |
} | |
Tensor VariableType::batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) const { | |
profiler::RecordFunction profiler("batch_norm"); | |
auto ret = Type::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); | |
if (jit::tracer::isTracing( input, weight, bias, running_mean, running_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "batch_norm", { input, weight, bias, running_mean, running_var }, { ret } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("momentum"), momentum); | |
setattr(n, jit::stringToSymbol("eps"), eps); | |
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::bernoulli_(Tensor & self, const Tensor & p, Generator * generator) const { | |
profiler::RecordFunction profiler("bernoulli_"); | |
auto ret = Type::bernoulli_(self, p, generator); | |
return self; | |
} | |
Tensor & VariableType::bernoulli_(Tensor & self, double p, Generator * generator) const { | |
profiler::RecordFunction profiler("bernoulli_"); | |
auto ret = Type::bernoulli_(self, p, generator); | |
return self; | |
} | |
std::vector<Tensor> VariableType::chunk(const Tensor & self, int64_t chunks, int64_t dim) const { | |
profiler::RecordFunction profiler("chunk"); | |
auto ret = Type::chunk(self, chunks, dim); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "chunk", { self }, cast_tensor_list(ret) ); | |
setattr(n, jit::stringToSymbol("chunks"), chunks); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return ret; | |
} | |
Tensor VariableType::convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups) const { | |
profiler::RecordFunction profiler("convolution"); | |
auto ret = Type::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) const { | |
profiler::RecordFunction profiler("_convolution"); | |
auto ret = Type::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); | |
if (jit::tracer::isTracing( input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "_convolution", { input, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("transposed"), transposed); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding) const { | |
profiler::RecordFunction profiler("_convolution_nogroup"); | |
auto ret = Type::_convolution_nogroup(input, weight, bias, stride, padding, dilation, transposed, output_padding); | |
if (jit::tracer::isTracing( input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "_convolution_nogroup", { input, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("transposed"), transposed); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::_convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("_convolution_double_backward"); | |
auto ggI_ = unpack_opt(ggI, "ggI", 0); | |
auto ggW_ = unpack_opt(ggW, "ggW", 1); | |
auto ggb_ = unpack_opt(ggb, "ggb", 2); | |
auto& gO_ = unpack(gO, "gO", 3); | |
auto& weight_ = unpack(weight, "weight", 4); | |
auto& self_ = unpack(self, "self", 5); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ ggI, ggW, ggb, gO, weight, self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for _convolution_double_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ ggI, ggW, ggb, gO, weight, self }); | |
} | |
auto ret = as_variable(baseType->_convolution_double_backward(ggI_, ggW_, ggb_, gO_, weight_, self_, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( ggI, ggW, ggb, gO, weight, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_convolution_double_backward", { ggI, ggW, ggb, gO, weight, self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("transposed"), transposed); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const { | |
profiler::RecordFunction profiler("conv1d"); | |
auto ret = Type::conv1d(input, weight, bias, stride, padding, dilation, groups); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const { | |
profiler::RecordFunction profiler("conv2d"); | |
auto ret = Type::conv2d(input, weight, bias, stride, padding, dilation, groups); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const { | |
profiler::RecordFunction profiler("conv3d"); | |
auto ret = Type::conv3d(input, weight, bias, stride, padding, dilation, groups); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad) const { | |
profiler::RecordFunction profiler("conv_tbc"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto& bias_ = unpack(bias, "bias", 2); | |
std::shared_ptr<ConvTbcBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ConvTbcBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->bias_ = SavedVariable(bias, false); | |
grad_fn->pad = pad; | |
} | |
auto ret = as_variable(baseType->conv_tbc(self_, weight_, bias_, pad)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "conv_tbc", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("pad"), pad); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad) const { | |
profiler::RecordFunction profiler("conv_tbc_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& input_ = unpack(input, "input", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& bias_ = unpack(bias, "bias", 3); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, input, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for conv_tbc_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, input, weight, bias }); | |
} | |
auto ret = as_variable(baseType->conv_tbc_backward(self_, input_, weight_, bias_, pad)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self, input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "conv_tbc_backward", { self, input, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("pad"), pad); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const { | |
profiler::RecordFunction profiler("conv_transpose1d"); | |
auto ret = Type::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const { | |
profiler::RecordFunction profiler("conv_transpose2d"); | |
auto ret = Type::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const { | |
profiler::RecordFunction profiler("conv_transpose3d"); | |
auto ret = Type::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation); | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) const { | |
profiler::RecordFunction profiler("cudnn_affine_grid_generator"); | |
auto& theta_ = unpack(theta, "theta", 0); | |
std::shared_ptr<CudnnAffineGridGeneratorBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ theta }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnAffineGridGeneratorBackward>(); | |
grad_fn->next_functions = compute_next_functions({ theta }); | |
grad_fn->N = N; | |
grad_fn->C = C; | |
grad_fn->H = H; | |
grad_fn->W = W; | |
} | |
auto ret = as_variable(baseType->cudnn_affine_grid_generator(theta_, N, C, H, W)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( theta )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator", { theta }, { ret } ); | |
setattr(n, jit::stringToSymbol("N"), N); | |
setattr(n, jit::stringToSymbol("C"), C); | |
setattr(n, jit::stringToSymbol("H"), H); | |
setattr(n, jit::stringToSymbol("W"), W); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) const { | |
profiler::RecordFunction profiler("cudnn_affine_grid_generator_backward"); | |
auto& grad_ = unpack(grad, "grad", 0); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ grad }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for cudnn_affine_grid_generator_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad }); | |
} | |
auto ret = as_variable(baseType->cudnn_affine_grid_generator_backward(grad_, N, C, H, W)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( grad )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator_backward", { grad }, { ret } ); | |
setattr(n, jit::stringToSymbol("N"), N); | |
setattr(n, jit::stringToSymbol("C"), C); | |
setattr(n, jit::stringToSymbol("H"), H); | |
setattr(n, jit::stringToSymbol("W"), W); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) const { | |
profiler::RecordFunction profiler("cudnn_batch_norm"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<CudnnBatchNormBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ input, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnBatchNormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ input, weight, bias }); | |
grad_fn->input_ = SavedVariable(input, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->training = training; | |
grad_fn->epsilon = epsilon; | |
} | |
auto ret = as_variable(baseType->cudnn_batch_norm(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( input, weight, bias, running_mean, running_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm", { input, weight, bias, running_mean, running_var }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("exponential_average_factor"), exponential_average_factor); | |
setattr(n, jit::stringToSymbol("epsilon"), epsilon); | |
} | |
if (grad_fn) { | |
auto& result1 = std::get<1>(ret); | |
grad_fn->result1_ = SavedVariable(result1, true); | |
auto& result2 = std::get<2>(ret); | |
grad_fn->result2_ = SavedVariable(result2, true); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon) const { | |
profiler::RecordFunction profiler("cudnn_batch_norm_backward"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
auto save_mean_ = unpack_opt(save_mean, "save_mean", 5); | |
auto save_var_ = unpack_opt(save_var, "save_var", 6); | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<CudnnBatchNormBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ input, grad_output, weight, save_mean, save_var }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnBatchNormBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ input, grad_output, weight, save_mean, save_var }); | |
grad_fn->input_ = SavedVariable(input, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->save_mean_ = SavedVariable(save_mean, false); | |
grad_fn->save_var_ = SavedVariable(save_var, false); | |
grad_fn->epsilon = epsilon; | |
} | |
auto ret = as_variable(baseType->cudnn_batch_norm_backward(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( input, grad_output, weight, running_mean, running_var, save_mean, save_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm_backward", { input, grad_output, weight, running_mean, running_var, save_mean, save_var }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("epsilon"), epsilon); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
std::shared_ptr<CudnnConvolutionBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnConvolutionBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
auto ret = as_variable(baseType->cudnn_convolution(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution_backward_input(IntList self_size, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward_input"); | |
auto ret = Type::cudnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_input", { grad_output, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("self_size"), self_size); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<CudnnConvolutionBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, grad_output, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnConvolutionBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, grad_output, weight }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
auto ret = as_variable(baseType->cudnn_convolution_backward(self_, grad_output_, weight_, padding, stride, dilation, groups, benchmark, deterministic, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward", { self, grad_output, weight }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution_backward_bias(const Tensor & grad_output) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward_bias"); | |
auto ret = Type::cudnn_convolution_backward_bias(grad_output); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_bias", { grad_output }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward_weight"); | |
auto ret = Type::cudnn_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_weight", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("weight_size"), weight_size); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
std::shared_ptr<CudnnConvolutionTransposeBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnConvolutionTransposeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
auto ret = as_variable(baseType->cudnn_convolution_transpose(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose", { self, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<CudnnConvolutionTransposeBackwardBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, grad_output, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnConvolutionTransposeBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, grad_output, weight }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
auto ret = as_variable(baseType->cudnn_convolution_transpose_backward(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward", { self, grad_output, weight }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution_transpose_backward_bias(const Tensor & grad_output) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_bias"); | |
auto ret = Type::cudnn_convolution_transpose_backward_bias(grad_output); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_bias", { grad_output }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_input"); | |
auto ret = Type::cudnn_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_input", { grad_output, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_convolution_transpose_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_weight"); | |
auto ret = Type::cudnn_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_weight", { grad_output, self }, { ret } ); | |
setattr(n, jit::stringToSymbol("weight_size"), weight_size); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::cudnn_grid_sampler(const Tensor & self, const Tensor & grid) const { | |
profiler::RecordFunction profiler("cudnn_grid_sampler"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& grid_ = unpack(grid, "grid", 1); | |
std::shared_ptr<CudnnGridSamplerBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, grid }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CudnnGridSamplerBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, grid }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->grid_ = SavedVariable(grid, false); | |
} | |
auto ret = as_variable(baseType->cudnn_grid_sampler(self_, grid_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self, grid )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler", { self, grid }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output) const { | |
profiler::RecordFunction profiler("cudnn_grid_sampler_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& grid_ = unpack(grid, "grid", 1); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 2); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, grid, grad_output }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for cudnn_grid_sampler_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, grid, grad_output }); | |
} | |
auto ret = as_variable(baseType->cudnn_grid_sampler_backward(self_, grid_, grad_output_)); | |
set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self, grid, grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler_backward", { self, grid, grad_output }, { std::get<0>(ret), std::get<1>(ret) } ); | |
(void)n; | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::det(const Tensor & self) const { | |
profiler::RecordFunction profiler("det"); | |
auto ret = Type::det(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "det", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor,Tensor> VariableType::_det_with_svd(const Tensor & self) const { | |
profiler::RecordFunction profiler("_det_with_svd"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<DetWithSvdBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<DetWithSvdBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto ret = as_variable(baseType->_det_with_svd(self_)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret), std::get<3>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_det_with_svd", { self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret), std::get<3>(ret) } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
auto& result0 = std::get<0>(ret); | |
grad_fn->result0_ = SavedVariable(result0, true); | |
auto& result1 = std::get<1>(ret); | |
grad_fn->result1_ = SavedVariable(result1, true); | |
auto& result2 = std::get<2>(ret); | |
grad_fn->result2_ = SavedVariable(result2, true); | |
auto& result3 = std::get<3>(ret); | |
grad_fn->result3_ = SavedVariable(result3, true); | |
} | |
return std::tuple<Tensor,Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::expand(const Tensor & self, IntList size) const { | |
profiler::RecordFunction profiler("expand"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ExpandBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<ExpandBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->expand(self_, size)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "expand", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::expand_as(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("expand_as"); | |
auto ret = Type::expand_as(self, other); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "expand_as", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::index(const Tensor & self, TensorList indices) const { | |
profiler::RecordFunction profiler("index"); | |
auto ret = Type::index(self, indices); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "index", flatten( self, indices ), { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::index_put_(Tensor & self, TensorList indices, const Tensor & values) const { | |
profiler::RecordFunction profiler("index_put_"); | |
auto ret = Type::index_put_(self, indices, values); | |
if (jit::tracer::isTracing( self, indices, values )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_put", flatten( self, indices, values ), { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
bool VariableType::is_cuda(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->is_cuda(self_); | |
} | |
bool VariableType::is_distributed(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->is_distributed(self_); | |
} | |
bool VariableType::is_nonzero(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->is_nonzero(self_); | |
} | |
bool VariableType::is_same_size(const Tensor & self, const Tensor & other) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
return baseType->is_same_size(self_, other_); | |
} | |
bool VariableType::is_signed(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->is_signed(self_); | |
} | |
bool VariableType::is_sparse(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->is_sparse(self_); | |
} | |
Tensor VariableType::matmul(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("matmul"); | |
auto ret = Type::matmul(self, other); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "matmul", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool1d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool1d"); | |
auto ret = Type::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool1d", { self }, { std::get<0>(ret), std::get<1>(ret) } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) const { | |
profiler::RecordFunction profiler("narrow"); | |
auto ret = Type::narrow(self, dim, start, length); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "narrow", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("start"), start); | |
setattr(n, jit::stringToSymbol("length"), length); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
std::shared_ptr<NnpackSpatialConvolutionBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ input, weight, bias }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<NnpackSpatialConvolutionBackward>(); | |
grad_fn->next_functions = compute_next_functions({ input, weight, bias }); | |
grad_fn->input_ = SavedVariable(input, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kW = kW; | |
grad_fn->kH = kH; | |
grad_fn->padW = padW; | |
grad_fn->padH = padH; | |
grad_fn->weight_sizes = weight.sizes(); | |
} | |
auto ret = as_variable(baseType->nnpack_spatial_convolution(input_, weight_, bias_, kW, kH, padW, padH)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution", { input, weight, bias }, { ret } ); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::nnpack_spatial_convolution_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, int64_t kW, int64_t kH, int64_t padW, int64_t padH, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ input, grad_output, weight }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for nnpack_spatial_convolution_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ input, grad_output, weight }); | |
} | |
auto ret = as_variable(baseType->nnpack_spatial_convolution_backward(input_, grad_output_, weight_, kW, kH, padW, padH, output_mask)); | |
set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn); | |
if (jit::tracer::isTracing( input, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward", { input, grad_output, weight }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } ); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::tuple<Tensor,Tensor,Tensor>(std::move(ret)); | |
} | |
Tensor VariableType::nnpack_spatial_convolution_backward_input(const Tensor & input, const Tensor & grad_output, const Tensor & weight, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward_input"); | |
auto ret = Type::nnpack_spatial_convolution_backward_input(input, grad_output, weight, kW, kH, padW, padH); | |
if (jit::tracer::isTracing( input, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_input", { input, grad_output, weight }, { ret } ); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::nnpack_spatial_convolution_backward_weight(const Tensor & input, IntList weight_size, const Tensor & grad_output, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward_weight"); | |
auto ret = Type::nnpack_spatial_convolution_backward_weight(input, weight_size, grad_output, kW, kH, padW, padH); | |
if (jit::tracer::isTracing( input, grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_weight", { input, grad_output }, { ret } ); | |
setattr(n, jit::stringToSymbol("weight_size"), weight_size); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::permute(const Tensor & self, IntList dims) const { | |
profiler::RecordFunction profiler("permute"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PermuteBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<PermuteBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dims = dims; | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->permute(self_, dims)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "permute", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dims"), dims); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::pin_memory(const Tensor & self) const { | |
profiler::RecordFunction profiler("pin_memory"); | |
auto ret = Type::pin_memory(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pin_memory", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::RoiPooling2d_backward(const Tensor & input, const Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale, const Tensor & gradOutput, const Tensor & argmaxes) const { | |
profiler::RecordFunction profiler("RoiPooling2d_backward"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& rois_ = unpack(rois, "rois", 1); | |
auto& gradOutput_ = unpack(gradOutput, "gradOutput", 5); | |
auto& argmaxes_ = unpack(argmaxes, "argmaxes", 6); | |
std::shared_ptr<Error> grad_fn; | |
auto requires_grad = compute_requires_grad({ input, rois, gradOutput, argmaxes }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<Error>("the derivative for RoiPooling2d_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ input, rois, gradOutput, argmaxes }); | |
} | |
auto ret = as_variable(baseType->RoiPooling2d_backward(input_, rois_, pooledHeight, pooledWidth, spatialScale, gradOutput_, argmaxes_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( input, rois, gradOutput, argmaxes )) { | |
jit::Node *n = jit::tracer::recordTrace( "RoiPooling2d_backward", { input, rois, gradOutput, argmaxes }, { ret } ); | |
setattr(n, jit::stringToSymbol("pooledHeight"), pooledHeight); | |
setattr(n, jit::stringToSymbol("pooledWidth"), pooledWidth); | |
setattr(n, jit::stringToSymbol("spatialScale"), spatialScale); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu"); | |
auto ret = Type::rrelu(self, lower, upper, training, generator); | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_"); | |
auto ret = Type::rrelu_(self, lower, upper, training, generator); | |
return self; | |
} | |
Tensor VariableType::select(const Tensor & self, int64_t dim, int64_t index) const { | |
profiler::RecordFunction profiler("select"); | |
auto ret = Type::select(self, dim, index); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "select", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("index"), index); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::selu(const Tensor & self) const { | |
profiler::RecordFunction profiler("selu"); | |
auto ret = Type::selu(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "selu", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::selu_(Tensor & self) const { | |
profiler::RecordFunction profiler("selu_"); | |
auto ret = Type::selu_(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "selu", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
int64_t VariableType::size(const Tensor & self, int64_t dim) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->size(self_, dim); | |
} | |
Tensor VariableType::slice(const Tensor & self, int64_t start, int64_t end, int64_t step, int64_t dim) const { | |
profiler::RecordFunction profiler("slice"); | |
auto ret = Type::slice(self, start, end, step, dim); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "slice", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("start"), start); | |
setattr(n, jit::stringToSymbol("end"), end); | |
setattr(n, jit::stringToSymbol("step"), step); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
std::vector<Tensor> VariableType::split(const Tensor & self, int64_t split_size, int64_t dim) const { | |
profiler::RecordFunction profiler("split"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SplitBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SplitBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->split_size = split_size; | |
grad_fn->dim = dim; | |
} | |
auto ret = as_variable(baseType->split(self_, split_size, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "split", { self }, ret ); | |
setattr(n, jit::stringToSymbol("split_size"), split_size); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return as_tensor_list(ret); | |
} | |
Tensor VariableType::squeeze(const Tensor & self) const { | |
profiler::RecordFunction profiler("squeeze"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SqueezeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SqueezeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->squeeze(self_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::squeeze(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("squeeze"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SqueezeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SqueezeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_argsize_dim = self.size(dim); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->squeeze(self_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::squeeze_(Tensor & self) const { | |
profiler::RecordFunction profiler("squeeze_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SqueezeBackward0> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SqueezeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
baseType->squeeze_(self_); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::squeeze_(Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("squeeze_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SqueezeBackward1> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SqueezeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_argsize_dim = self.size(dim); | |
grad_fn->dim = dim; | |
} | |
baseType->squeeze_(self_, dim); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::stack(TensorList tensors, int64_t dim) const { | |
profiler::RecordFunction profiler("stack"); | |
auto ret = Type::stack(tensors, dim); | |
if (jit::tracer::isTracing( tensors )) { | |
jit::Node *n = jit::tracer::recordTrace( "stack", flatten( tensors ), { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::stft(const Tensor & self, int64_t frame_length, int64_t hop, int64_t fft_size, bool return_onesided, const Tensor & window, int64_t pad_end) const { | |
profiler::RecordFunction profiler("stft"); | |
auto ret = Type::stft(self, frame_length, hop, fft_size, return_onesided, window, pad_end); | |
if (jit::tracer::isTracing( self, window )) { | |
jit::Node *n = jit::tracer::recordTrace( "stft", { self, window }, { ret } ); | |
setattr(n, jit::stringToSymbol("frame_length"), frame_length); | |
setattr(n, jit::stringToSymbol("hop"), hop); | |
setattr(n, jit::stringToSymbol("fft_size"), fft_size); | |
setattr(n, jit::stringToSymbol("return_onesided"), return_onesided); | |
setattr(n, jit::stringToSymbol("pad_end"), pad_end); | |
} | |
return Tensor(std::move(ret)); | |
} | |
int64_t VariableType::stride(const Tensor & self, int64_t dim) const { | |
auto& self_ = unpack(self, "self", 0); | |
return baseType->stride(self_, dim); | |
} | |
Tensor VariableType::type_as(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("type_as"); | |
auto ret = Type::type_as(self, other); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "type_as", { self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::unsqueeze(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("unsqueeze"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UnsqueezeBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UnsqueezeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
} | |
auto ret = as_view(static_cast<const Variable&>(self), baseType->unsqueeze(self_, dim)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "unsqueeze", { self }, { ret } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor & VariableType::unsqueeze_(Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("unsqueeze_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<UnsqueezeBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<UnsqueezeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
} | |
baseType->unsqueeze_(self_, dim); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(static_cast<Variable&>(self), grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "unsqueeze", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::where(const Tensor & condition, const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("where"); | |
auto ret = Type::where(condition, self, other); | |
if (jit::tracer::isTracing( condition, self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "where", { condition, self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("_s_where"); | |
auto& condition_ = unpack_byte(condition, "condition", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& other_ = unpack(other, "other", 2); | |
std::shared_ptr<SWhereBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ condition, self, other }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<SWhereBackward>(); | |
grad_fn->next_functions = compute_next_functions({ condition, self, other }); | |
grad_fn->condition_info = condition; | |
grad_fn->condition_ = SavedVariable(condition, false); | |
} | |
auto ret = as_variable(baseType->_s_where(condition_, self_, other_)); | |
set_history(ret, grad_fn); | |
if (jit::tracer::isTracing( condition, self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "_s_where", { condition, self, other }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
Tensor VariableType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
profiler::RecordFunction profiler("_standard_gamma_grad"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& output_ = unpack(output, "output", 1); | |
std::shared_ptr<StandardGammaGradBackward> grad_fn; | |
auto requires_grad = compute_requires_grad({ self }); | |
if (requires_grad) { | |
grad_fn = std::make_shared<StandardGammaGradBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto ret = as_variable(baseType->_standard_gamma_grad(self_, output_)); | |
set_history({ ret }, grad_fn); | |
if (jit::tracer::isTracing( self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "_standard_gamma_grad", { self, output }, { ret } ); | |
(void)n; | |
} | |
return Tensor(std::move(ret)); | |
} | |
}} // namespace torch::autograd |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment