-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
102 lines (83 loc) · 2.52 KB
/
model.py
File metadata and controls
102 lines (83 loc) · 2.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
import torch.nn as nn
class nn_model(nn.Module):
def __init__(self, input_dim, hl1, hl2, hl3, dropout, activ):
"""
__init__()
Initiates nn_model instance with given hyperparameters
Parameters
----------
input_dim : int
Size of input data.
hl1 : int
Size of first hidden layer.
hl2 : int
Size of second hidden layer.
dropout : float
Proportion of data to drop (ranging from 0.0 to 1.0).
activ : function
Activation function to use. Base case is ReLU function.
Returns
-------
None.
"""
super(nn_model, self).__init__()
if activ == "leaky_relu":
activation_layer = nn.LeakyReLU()
elif activ == "elu":
activation_layer = nn.ELU()
else:
activation_layer = nn.ReLU()
layers = [
nn.Linear(input_dim, hl1),
nn.BatchNorm1d(hl1),
activation_layer,
nn.Dropout(dropout),
nn.Linear(hl1, hl2),
nn.BatchNorm1d(hl2),
activation_layer,
nn.Dropout(dropout),
nn.Linear(hl2, hl3),
nn.BatchNorm1d(hl3),
activation_layer,
nn.Linear(hl3, 1)
]
self.model = torch.nn.Sequential(*layers)
def forward(self, x):
"""
forward()
Feeds input data x through the model's layers
Parameters
----------
x : Array
Input data stored in an array format.
Returns
-------
out : Float
Predicted value from passing x through the model.
"""
out = self.model(x)
return out
class job_model(nn.Module):
def __init__(self, input_dim, hl1, hl2, dropout, activ):
super(job_model, self).__init__()
if activ == "leaky_relu":
activation_layer = nn.LeakyReLU()
elif activ == "elu":
activation_layer = nn.ELU()
else:
activation_layer = nn.ReLU()
layers = [
nn.Linear(input_dim, hl1),
# nn.BatchNorm1d(hl1),
activation_layer,
nn.Dropout(dropout),
nn.Linear(hl1, hl2),
# nn.BatchNorm1d(hl2),
activation_layer,
nn.Linear(hl2, 1)
]
self.model = torch.nn.Sequential(*layers)
def forward(self, x):
out = self.model(x)
return out