diff --git a/label_studio_ml/examples/bert_classifier/model.py b/label_studio_ml/examples/bert_classifier/model.py index d5bc5f6e..f0c54a91 100644 --- a/label_studio_ml/examples/bert_classifier/model.py +++ b/label_studio_ml/examples/bert_classifier/model.py @@ -122,7 +122,8 @@ def fit(self, event, data, **additional_params): if event not in ('ANNOTATION_CREATED', 'ANNOTATION_UPDATED', 'START_TRAINING'): logger.info(f"Skip training: event {event} is not supported") return - project_id = data['annotation']['project'] + logger.debug(f"Project details payload for training: {data}") + project_id = data['project']['id'] # dowload annotated tasks from Label Studio ls = label_studio_sdk.Client(self.LABEL_STUDIO_HOST, self.LABEL_STUDIO_API_KEY) diff --git a/label_studio_ml/examples/huggingface_ner/model.py b/label_studio_ml/examples/huggingface_ner/model.py index 5e89f8a8..74aee430 100644 --- a/label_studio_ml/examples/huggingface_ner/model.py +++ b/label_studio_ml/examples/huggingface_ner/model.py @@ -52,7 +52,7 @@ def get_labels(self): from_name, _, _ = li.get_first_tag_occurence('Labels', 'Text') tag = li.get_tag(from_name) return tag.labels - + def setup(self): """Configure any paramaters of your model here """ @@ -102,7 +102,7 @@ def predict(self, tasks: List[Dict], context: Optional[Dict] = None, **kwargs) - 'score': avg_score / len(results), 'model_version': self.get('model_version') }) - + return ModelResponse(predictions=predictions, model_version=self.get('model_version')) def _get_tasks(self, project_id): @@ -135,7 +135,7 @@ def tokenize_and_align_labels(self, examples, tokenizer): tokenized_inputs["labels"] = labels return tokenized_inputs - + def fit(self, event, data, **kwargs): """Download dataset from Label Studio and prepare data for training in BERT """ @@ -143,7 +143,8 @@ def fit(self, event, data, **kwargs): logger.info(f"Skip training: event {event} is not supported") return - project_id = data['annotation']['project'] + logger.debug(f"Project details payload for training: {data}") + project_id = data['project']['id'] tasks = self._get_tasks(project_id) if len(tasks) % self.START_TRAINING_EACH_N_UPDATES != 0 and event != 'START_TRAINING': diff --git a/label_studio_ml/examples/sklearn_text_classifier/model.py b/label_studio_ml/examples/sklearn_text_classifier/model.py index 78adbac6..c162d350 100644 --- a/label_studio_ml/examples/sklearn_text_classifier/model.py +++ b/label_studio_ml/examples/sklearn_text_classifier/model.py @@ -74,7 +74,7 @@ def get_label_studio_parameters(self) -> Dict: 'value': value, 'labels': labels } - + def predict(self, tasks: List[Dict], context: Optional[Dict] = None, **kwargs) -> ModelResponse: """ This method is used to predict the labels for a given list of tasks. @@ -162,7 +162,8 @@ def fit(self, event, data, **kwargs): logger.info(f"Skip training: event {event} is not supported") return - project_id = data['annotation']['project'] + logger.debug(f"Project details payload for training: {data}") + project_id = data['project']['id'] tasks = self._get_tasks(project_id) # Get the labeling configuration parameters like labels and input / output annotation format names