caveat.models.continuous.auto_attention
AttentionDecoder(input_size, output_size, hidden_size, ffwd_size, num_heads, num_layers, length, dropout=0.0, position_embedding='learnt', sos=0)
#
Bases: Module
Source code in caveat/models/continuous/auto_attention.py
activity_logprob_activation = nn.LogSoftmax(dim=-1)
instance-attribute
#
blocks = nn.ModuleList([DecoderBlockMAskedSelfAttention(hidden_size, n_head=num_heads, dropout=dropout, block_size=length, ffwd_size=ffwd_size) for _ in range(num_layers)])
instance-attribute
#
duration_activation = nn.Sigmoid()
instance-attribute
#
embedding = CustomDurationEmbeddingConcat(input_size, hidden_size, dropout=dropout)
instance-attribute
#
lm_head = nn.Linear(hidden_size, output_size)
instance-attribute
#
max_length = length
instance-attribute
#
output_size = output_size
instance-attribute
#
position_embedding = LearntPositionalEncoding(d_model=hidden_size, dropout=dropout, length=length)
instance-attribute
#
sos = sos
instance-attribute
#
forward(target, mask=None)
#
Source code in caveat/models/continuous/auto_attention.py
AttentionHead(head_size, n_embd=10, block_size=128, dropout=0.0)
#
Bases: Module
one head of self-attention
Source code in caveat/models/continuous/auto_attention.py
dropout = nn.Dropout(dropout)
instance-attribute
#
key = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
query = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
value = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
forward(x, mask=None)
#
Source code in caveat/models/continuous/auto_attention.py
AutoContAtt(*args, **kwargs)
#
Bases: Base
RNN based encoder and decoder with encoder embedding layer.
Source code in caveat/models/continuous/auto_attention.py
build(**config)
#
Source code in caveat/models/continuous/auto_attention.py
decode(context, mask, **kwargs)
#
Decode latent sample to batch of output sequences.
PARAMETER | DESCRIPTION |
---|---|
z
|
Latent space batch [N, latent_dims].
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
tensor
|
Output sequence batch [N, steps, acts].
TYPE:
|
Source code in caveat/models/continuous/auto_attention.py
forward(x, target=None, input_mask=None, **kwargs)
#
Forward pass, also return latent parameterization.
PARAMETER | DESCRIPTION |
---|---|
x
|
Input sequences [N, L, Cin].
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
List[Tensor]
|
list[tensor]: [Log probs, Probs [N, L, Cout], Input [N, L, Cin], mu [N, latent], var [N, latent]]. |
Source code in caveat/models/continuous/auto_attention.py
infer(x, device, input_mask=None, **kwargs)
#
Given an encoder input, return reconstructed output and z samples.
PARAMETER | DESCRIPTION |
---|---|
x
|
[N, steps, acts].
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
Tensor
|
(tensor: [N, steps, acts], tensor: [N, latent_dims]). |
Source code in caveat/models/continuous/auto_attention.py
loss_function(log_probs, target, mask, **kwargs)
#
Loss function for sequence encoding [N, L, 2].
Source code in caveat/models/continuous/auto_attention.py
predict(z, device, **kwargs)
#
Given samples from the latent space, return the corresponding decoder space map.
PARAMETER | DESCRIPTION |
---|---|
z
|
[N, latent_dims].
TYPE:
|
current_device
|
Device to run the model.
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
tensor
|
[N, steps, acts].
TYPE:
|
Source code in caveat/models/continuous/auto_attention.py
predict_sequences(current_device, **kwargs)
#
Given samples from the latent space, return the corresponding decoder space map.
PARAMETER | DESCRIPTION |
---|---|
z
|
[N, latent_dims].
TYPE:
|
current_device
|
Device to run the model.
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
tensor
|
[N, steps, acts].
TYPE:
|
Source code in caveat/models/continuous/auto_attention.py
sample(logits)
#
Source code in caveat/models/continuous/auto_attention.py
validation_step(batch, batch_idx, optimizer_idx=0)
#
Override the validation step to include the target during validation. This is required for self attention.
Source code in caveat/models/continuous/auto_attention.py
CrossAttentionHead(head_size, n_embd=10, block_size=128, dropout=0.0)
#
Bases: Module
one head of x-attention
Source code in caveat/models/continuous/auto_attention.py
dropout = nn.Dropout(dropout)
instance-attribute
#
key = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
query = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
value = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
forward(x_encode, x_decode, mask=None)
#
Source code in caveat/models/continuous/auto_attention.py
DecoderBlockMAskedSelfAttention(n_embd, n_head, block_size, dropout, ffwd_size=None)
#
Bases: Module
Source code in caveat/models/continuous/auto_attention.py
ffwd = FeedFoward(n_embd=n_embd, ffwd_size=ffwd_size)
instance-attribute
#
ln1 = nn.RMSNorm(n_embd)
instance-attribute
#
ln2 = nn.RMSNorm(n_embd)
instance-attribute
#
self_attention = MultiHeadMaskedAttention(num_heads=n_head, head_size=head_size, n_embd=n_embd, block_size=block_size, dropout=dropout)
instance-attribute
#
FeedFoward(n_embd, dropout=0.0, ffwd_size=None)
#
Bases: Module
a simple linear layer followed by a non-linearity
Source code in caveat/models/continuous/auto_attention.py
FixedPositionalEncoding(d_model, dropout=0.0, length=144)
#
Bases: Module
Source code in caveat/models/continuous/auto_attention.py
LearntPositionalEncoding(d_model, dropout=0.0, length=144)
#
Bases: Module
Source code in caveat/models/continuous/auto_attention.py
MaskedAttentionHead(head_size, n_embd, block_size, dropout=0.0)
#
Bases: Module
one head of self-attention
Source code in caveat/models/continuous/auto_attention.py
dropout = nn.Dropout(dropout)
instance-attribute
#
key = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
query = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
value = nn.Linear(n_embd, head_size, bias=False)
instance-attribute
#
forward(x, mask=None)
#
Source code in caveat/models/continuous/auto_attention.py
MultiHeadAttention(num_heads, head_size, n_embd=10, dropout=0.0)
#
Bases: Module
multiple heads of self-attention in parallel
Source code in caveat/models/continuous/auto_attention.py
MultiHeadCrossAttention(num_heads, head_size, n_embd=10, dropout=0.0)
#
Bases: Module
multiple heads of masked x-attention in parallel
Source code in caveat/models/continuous/auto_attention.py
MultiHeadMaskedAttention(num_heads, head_size, block_size, n_embd, dropout=0.0)
#
Bases: Module
Multiple heads of masked self-attention in parallel