vivek01 commited on
Commit
1438e2e
·
verified ·
1 Parent(s): f5e53fc

Upload 2 files

Browse files
Files changed (2) hide show
  1. handlers.py +144 -0
  2. requirements.txt +3 -0
handlers.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import DebertaV2Tokenizer , DebertaV2Model
4
+ from typing import Dict, Any
5
+ import joblib
6
+ import os
7
+
8
+ # Define the EndpointHandler class
9
+ class EndpointHandler:
10
+ def __init__(self, model_path =""):
11
+ # Load tokenizer and model from the Hugging Face repository
12
+ self.tokenizer = DebertaV2Tokenizer.from_pretrained(model_path)
13
+
14
+ # Initialize the custom multitask DeBERTa model with pre-defined label counts
15
+ self.model = MultitaskDebertaModel(num_emotion_labels=8, num_polarity_labels=4, num_hate_speech_labels=2)
16
+ self.model = self.model.from_pretrained(model_path)
17
+
18
+ # Use GPU if available
19
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
+ self.model.to(self.device)
21
+ self.model.eval()
22
+
23
+ # Load the encoder files directly from the root of the repository
24
+ self.emotion_encoder = joblib.load(os.path.join(model_path, 'emotion_encoder.pkl'))
25
+ self.polarity_encoder = joblib.load(os.path.join(model_path, 'polarity_encoder.pkl'))
26
+ self.hate_speech_encoder = joblib.load(os.path.join(model_path, 'hate_speech_encoder.pkl'))
27
+
28
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
29
+ # Preprocess input: extract the text from the request payload
30
+ text = data.get('inputs')
31
+
32
+ # Tokenize the input text
33
+ inputs = self.tokenizer(text, return_tensors='pt', max_length=256, truncation=True, padding=True)
34
+ if 'token_type_ids' in inputs:
35
+ del inputs['token_type_ids']
36
+ inputs = {key: val.to(self.device) for key, val in inputs.items()}
37
+
38
+ # Run the input through the model
39
+ with torch.no_grad():
40
+ outputs = self.model(**inputs)
41
+ emotion_logits = outputs.get('emotion')
42
+ polarity_logits = outputs.get('polarity')
43
+ hate_speech_logits = outputs.get('hate_speech')
44
+
45
+ # Decode predictions from logits using argmax and the label encoders
46
+ emotion_preds = torch.argmax(emotion_logits, dim=1).cpu().numpy()
47
+ polarity_preds = torch.argmax(polarity_logits, dim=1).cpu().numpy()
48
+ hate_speech_preds = torch.argmax(hate_speech_logits, dim=1).cpu().numpy()
49
+
50
+ # Inverse transform the predictions to get human-readable labels
51
+ decoded_emotions = self.emotion_encoder.inverse_transform(emotion_preds)
52
+ decoded_polarities = self.polarity_encoder.inverse_transform(polarity_preds)
53
+ decoded_hate_speech = self.hate_speech_encoder.inverse_transform(hate_speech_preds)
54
+
55
+ # Return the decoded results as a dictionary
56
+ return {
57
+ "emotions": decoded_emotions,
58
+ "polarities": decoded_polarities,
59
+ "hate_speech": decoded_hate_speech
60
+ }
61
+
62
+ # Define your custom multitask model architecture here
63
+ class MultitaskDebertaModel(nn.Module):
64
+ def __init__(self, num_emotion_labels, num_polarity_labels, num_hate_speech_labels):
65
+ super(MultitaskDebertaModel, self).__init__()
66
+ self.deberta = DebertaV2Model.from_pretrained('microsoft/deberta-v3-base')
67
+
68
+ # Freeze the first 5 layers of DeBERTa to speed up training and inference
69
+ for param in self.deberta.encoder.layer[:5]:
70
+ for p in param.parameters():
71
+ p.requires_grad = False
72
+
73
+ # LSTM layers for each task
74
+ self.emotion_lstm = nn.LSTM(768, 128, bidirectional=True, batch_first=True)
75
+ self.polarity_lstm = nn.LSTM(768, 128, bidirectional=True, batch_first=True)
76
+ self.hate_speech_lstm = nn.LSTM(768, 128, bidirectional=True, batch_first=True)
77
+
78
+ # Attention layers for each task
79
+ self.emotion_attention = nn.MultiheadAttention(embed_dim=256, num_heads=8, batch_first=True)
80
+ self.polarity_attention = nn.MultiheadAttention(embed_dim=256, num_heads=8, batch_first=True)
81
+ self.hate_speech_attention = nn.MultiheadAttention(embed_dim=256, num_heads=8, batch_first=True)
82
+
83
+ # Dense layers for each task after attention
84
+ self.emotion_dense = nn.Linear(256, 128)
85
+ self.polarity_dense = nn.Linear(256, 128)
86
+ self.hate_speech_dense = nn.Linear(256, 128)
87
+
88
+ # Fusion layer that combines the task-specific features and the CLS token
89
+ self.fusion_dense = nn.Linear(128 + 128 + 128 + 768, 128)
90
+
91
+ # Task-specific classifiers
92
+ self.emotion_classifier = nn.Linear(128, num_emotion_labels)
93
+ self.polarity_classifier = nn.Linear(128, num_polarity_labels)
94
+ self.hate_speech_classifier = nn.Linear(128, num_hate_speech_labels)
95
+
96
+ # Regularization layers: layer normalization and dropout
97
+ self.layer_norm = nn.LayerNorm(128)
98
+ self.dropout = nn.Dropout(p=0.3)
99
+ self.relu = nn.ReLU()
100
+
101
+ def forward(self, input_ids, attention_mask):
102
+ # Extract DeBERTa outputs
103
+ deberta_outputs = self.deberta(input_ids, attention_mask=attention_mask)
104
+ sequence_output = deberta_outputs.last_hidden_state
105
+ cls_output = sequence_output[:, 0, :] # CLS token output
106
+
107
+ # Task-specific LSTM outputs
108
+ emotion_lstm_output, _ = self.emotion_lstm(sequence_output)
109
+ polarity_lstm_output, _ = self.polarity_lstm(sequence_output)
110
+ hate_speech_lstm_output, _ = self.hate_speech_lstm(sequence_output)
111
+
112
+ # Task-specific attention outputs
113
+ emotion_attention_output, _ = self.emotion_attention(emotion_lstm_output, emotion_lstm_output, emotion_lstm_output)
114
+ polarity_attention_output, _ = self.polarity_attention(polarity_lstm_output, polarity_lstm_output, polarity_lstm_output)
115
+ hate_speech_attention_output, _ = self.hate_speech_attention(hate_speech_lstm_output, hate_speech_lstm_output, hate_speech_lstm_output)
116
+
117
+ # Pool the attention outputs
118
+ emotion_features = torch.mean(emotion_attention_output, dim=1)
119
+ polarity_features = torch.mean(polarity_attention_output, dim=1)
120
+ hate_speech_features = torch.mean(hate_speech_attention_output, dim=1)
121
+
122
+ # Apply dense layers after pooling
123
+ emotion_features = self.relu(self.emotion_dense(emotion_features))
124
+ polarity_features = self.relu(self.polarity_dense(polarity_features))
125
+ hate_speech_features = self.relu(self.hate_speech_dense(hate_speech_features))
126
+
127
+ # Combine all features (task-specific features + CLS token)
128
+ combined_features = torch.cat([emotion_features, polarity_features, hate_speech_features, cls_output], dim=-1)
129
+ combined_features = self.relu(self.fusion_dense(combined_features))
130
+
131
+ # Apply layer normalization and dropout
132
+ combined_features = self.layer_norm(combined_features)
133
+ combined_features = self.dropout(combined_features)
134
+
135
+ # Task-specific logits
136
+ emotion_logits = self.emotion_classifier(combined_features)
137
+ polarity_logits = self.polarity_classifier(combined_features)
138
+ hate_speech_logits = self.hate_speech_classifier(combined_features)
139
+
140
+ return {
141
+ 'emotion': emotion_logits,
142
+ 'polarity': polarity_logits,
143
+ 'hate_speech': hate_speech_logits
144
+ }
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ joblib