aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorguaneec <guaneec@users.noreply.github.com>2022-10-26 12:10:30 +0800
committerGitHub <noreply@github.com>2022-10-26 12:10:30 +0800
commit2f4c91894d4c0a055c1069b2fda0e4da8fcda188 (patch)
treefd364c79fc3be446f28332a38ce08460a495b244
parent3e15f8e0f5cc87507f77546d92435670644dbd18 (diff)
Remove activation from final layer of HNs
-rw-r--r--modules/hypernetworks/hypernetwork.py6
1 files changed, 3 insertions, 3 deletions
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index d647ea55..54346b64 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -41,8 +41,8 @@ class HypernetworkModule(torch.nn.Module):
# Add a fully-connected layer
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
- # Add an activation func
- if activation_func == "linear" or activation_func is None:
+ # Add an activation func except last layer
+ if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 3:
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
@@ -53,7 +53,7 @@ class HypernetworkModule(torch.nn.Module):
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
- # Add dropout expect last layer
+ # Add dropout except last layer
if use_dropout and i < len(layer_structure) - 3:
linears.append(torch.nn.Dropout(p=0.3))