Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions Appraise/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,11 @@
campaign_views.campaign_status,
name='campaign_status',
),
re_path(
r'^pairwise-assessment-document-esa/$',
evalview_views.pairwise_assessment_document_esa,
name="pairwise-assessment-document-esa",
),
]

if DEBUG:
Expand Down
28 changes: 28 additions & 0 deletions Campaign/management/commands/MakeAnnotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,34 @@ def handle(self, *args, **options):
mqms[0],
username,
)

##################################################################
elif campaign_type == "PairwiseDocumentESA":
if len(scores) != 2:
raise ValueError('Task "PairwiseDocumentESA" requires exactly 2 scores')

data = {
"score1": scores[0],
"score2": scores[1],
"mqm1": mqms[0],
"mqm2": mqms[1],
"item_id": response.context["item_id"],
"task_id": response.context["task_id"],
"document_id": response.context["document_id"],
"start_timestamp": (datetime.now() - timedelta(minutes=5)).timestamp(),
"end_timestamp": datetime.now().timestamp(),
}

msg_info = "item {}/{}/{} with score(s) {}, {} and mqm(s) {}, {} for user {}".format(
response.context["item_id"],
response.context["task_id"],
response.context["document_id"],
scores[0],
scores[1],
mqms[0],
mqms[1],
username,
)

##################################################################
elif campaign_type == "Data":
Expand Down
3 changes: 3 additions & 0 deletions Campaign/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from EvalData.models import DataAssessmentResult
from EvalData.models import DirectAssessmentDocumentResult
from EvalData.models import PairwiseAssessmentDocumentResult
from EvalData.models import PairwiseAssessmentDocumentESAResult
from EvalData.models import PairwiseAssessmentResult
from EvalData.models import seconds_to_timedelta
from EvalData.models import TASK_DEFINITIONS
Expand Down Expand Up @@ -77,12 +78,14 @@ def campaign_status(request, campaign_name, sort_key=2):
if (
result_type is DirectAssessmentDocumentResult
or result_type is PairwiseAssessmentDocumentResult
or result_type is PairwiseAssessmentDocumentESAResult
):
_data = _data.exclude(item__isCompleteDocument=True)
# Contrastive tasks use different field names for target segments/scores
if (
result_type is PairwiseAssessmentResult
or result_type is PairwiseAssessmentDocumentResult
or result_type is PairwiseAssessmentDocumentESAResult
):
_data = _data.values_list(
'start_time',
Expand Down
2 changes: 2 additions & 0 deletions Dashboard/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from EvalData.models import DirectAssessmentResult
from EvalData.models import MultiModalAssessmentResult
from EvalData.models import PairwiseAssessmentDocumentResult
from EvalData.models import PairwiseAssessmentDocumentESAResult
from EvalData.models import PairwiseAssessmentResult
from EvalData.models import RESULT_TYPES

Expand Down Expand Up @@ -75,6 +76,7 @@ def run_quality_control(username):
if (
result_type is PairwiseAssessmentResult
or result_type is PairwiseAssessmentDocumentResult
or result_type is PairwiseAssessmentDocumentESAResult
):
_data = _data.values_list(
'start_time',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# Generated by Django 4.1 on 2024-07-25 07:29

from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion


class Migration(migrations.Migration):

dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Campaign', '0015_alter_campaign_activatedby_alter_campaign_batches_and_more'),
('EvalData', '0054_alter_dataassessmentresult_activatedby_and_more'),
]

operations = [
migrations.AddField(
model_name='textsegmentwithtwotargets',
name='mqm1',
field=models.TextField(blank=True, default='[]', verbose_name='MQM Annotations (1)'),
),
migrations.AddField(
model_name='textsegmentwithtwotargets',
name='mqm2',
field=models.TextField(blank=True, default='[]', verbose_name='MQM Annotations (2)'),
),
migrations.CreateModel(
name='PairwiseAssessmentDocumentESATask',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?')),
('completed', models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?')),
('retired', models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('_str_name', models.TextField(blank=True, default='', editable=False)),
('requiredAnnotations', models.PositiveSmallIntegerField(help_text='(value in range=[1,50])', verbose_name='Required annotations')),
('batchNo', models.PositiveIntegerField(help_text='(1-based)', verbose_name='Batch number')),
('activatedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_activated_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Activated by')),
('assignedTo', models.ManyToManyField(blank=True, db_index=True, help_text='(users working on this task)', related_name='%(app_label)s_%(class)s_assignedTo', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Assigned to')),
('batchData', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_batchData', related_query_name='%(app_label)s_%(class)ss', to='Campaign.campaigndata', verbose_name='Batch data')),
('campaign', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_campaign', related_query_name='%(app_label)s_%(class)ss', to='Campaign.campaign', verbose_name='Campaign')),
('completedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_completed_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Completed by')),
('createdBy', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_created_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('items', models.ManyToManyField(related_name='%(app_label)s_%(class)s_items', related_query_name='%(app_label)s_%(class)ss', to='EvalData.textsegmentwithtwotargetswithcontext', verbose_name='Items')),
('modifiedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_modified_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Modified by')),
('retiredBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_retired_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Retired by')),
],
options={
'ordering': ['_str_name'],
'abstract': False,
},
),
migrations.CreateModel(
name='PairwiseAssessmentDocumentESAResult',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?')),
('completed', models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?')),
('retired', models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('_str_name', models.TextField(blank=True, default='', editable=False)),
('score1', models.PositiveSmallIntegerField(help_text='(value in range=[1,100])', verbose_name='Score (1)')),
('score2', models.PositiveSmallIntegerField(blank=True, help_text='(value in range=[1,100])', null=True, verbose_name='Score (2)')),
('start_time', models.FloatField(help_text='(in seconds)', verbose_name='Start time')),
('end_time', models.FloatField(help_text='(in seconds)', verbose_name='End time')),
('activatedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_activated_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Activated by')),
('completedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_completed_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Completed by')),
('createdBy', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_created_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_item', related_query_name='%(app_label)s_%(class)ss', to='EvalData.textsegmentwithtwotargetswithcontext', verbose_name='Item')),
('modifiedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_modified_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Modified by')),
('retiredBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_retired_by', related_query_name='%(app_label)s_%(class)ss', to=settings.AUTH_USER_MODEL, verbose_name='Retired by')),
('task', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='%(app_label)s_%(class)s_task', related_query_name='%(app_label)s_%(class)ss', to='EvalData.pairwiseassessmentdocumentesatask', verbose_name='Task')),
('mqm1', models.TextField(default='[]', help_text='MQM JSON string', verbose_name='MQM (1)')),
('mqm2', models.TextField(default='[]', help_text='MQM JSON string', verbose_name='MQM (2)')),
],
options={
'ordering': ['_str_name'],
'abstract': False,
},
),
]
9 changes: 9 additions & 0 deletions EvalData/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,15 @@
'evaldata_pairwiseassessmentdocumenttasks',
'evaldata_pairwiseassessmentdocumentresults',
),
(
'PairwiseDocumentESA',
PairwiseAssessmentDocumentESATask,
PairwiseAssessmentDocumentESAResult,
'pairwise-assessment-document-esa',
TextSegmentWithTwoTargetsWithContext,
'evaldata_pairwiseassessmentdocumentesatasks',
'evaldata_pairwiseassessmentdocumentesaresults',
),
(
'Data',
DataAssessmentTask,
Expand Down
15 changes: 15 additions & 0 deletions EvalData/models/base_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ def get_object_instance(self):
DirectAssessmentDocumentTask,
MultiModalAssessmentTask,
PairwiseAssessmentDocumentTask,
PairwiseAssessmentDocumentESATask,
PairwiseAssessmentTask,
)

Expand Down Expand Up @@ -722,6 +723,20 @@ class TextSegmentWithTwoTargets(TextSegment):
blank=True, null=True, verbose_name=_('Target context (2)')
)

# used for error span annotations
mqm1 = models.TextField(
blank=True,
verbose_name=_('MQM Annotations (1)'),
default="[]",
)

# used for error span annotations
mqm2 = models.TextField(
blank=True,
verbose_name=_('MQM Annotations (2)'),
default="[]",
)

def has_context(self):
"""Checks if the current segment has context provided."""
return self.contextLeft or self.contextRight
Expand Down
Loading