diff --git a/corehq/apps/accounting/bootstrap/utils.py b/corehq/apps/accounting/bootstrap/utils.py index 3bea07c4f521..64e56f64a361 100644 --- a/corehq/apps/accounting/bootstrap/utils.py +++ b/corehq/apps/accounting/bootstrap/utils.py @@ -171,8 +171,9 @@ def _ensure_software_plan(plan_key, product, product_rate, verbose, apps): plan_opts = { 'name': plan_name, 'edition': plan_key.edition, - 'visibility': (SoftwarePlanVisibility.ANNUAL - if plan_key.is_annual_plan else SoftwarePlanVisibility.PUBLIC), + 'visibility': (SoftwarePlanVisibility.INTERNAL + if plan_key.edition == SoftwarePlanEdition.ENTERPRISE + else SoftwarePlanVisibility.PUBLIC), } if plan_key.is_annual_plan is not None: plan_opts['is_annual_plan'] = plan_key.is_annual_plan diff --git a/corehq/apps/accounting/migrations/0095_update_softwareplan_visibilities.py b/corehq/apps/accounting/migrations/0095_update_softwareplan_visibilities.py new file mode 100644 index 000000000000..c05fdc5c6938 --- /dev/null +++ b/corehq/apps/accounting/migrations/0095_update_softwareplan_visibilities.py @@ -0,0 +1,44 @@ +from datetime import datetime + +from django.db import migrations, models + +from corehq.apps.accounting.models import SoftwarePlanVisibility + +ANNUAL = "ANNUAL" + + +def change_plan_visibilities(apps, schema_editor): + # one-time cleanup of existing software plans + SoftwarePlan = apps.get_model('accounting', 'SoftwarePlan') + + enterprise_names = ["Dimagi Only CommCare Enterprise Edition"] + enterprise_plans = SoftwarePlan.objects.filter(name__in=enterprise_names) + enterprise_plans.update(visibility=SoftwarePlanVisibility.INTERNAL, last_modified=datetime.now()) + + annual_plans = SoftwarePlan.objects.filter(visibility=ANNUAL) + annual_plans.update(visibility=SoftwarePlanVisibility.PUBLIC, last_modified=datetime.now()) + + +class Migration(migrations.Migration): + + dependencies = [ + ("accounting", "0094_add_annual_softwareplans"), + ] + + operations = [ + migrations.RunPython(change_plan_visibilities), + migrations.AlterField( + model_name="softwareplan", + name="visibility", + field=models.CharField( + choices=[ + ("PUBLIC", "PUBLIC - Anyone can subscribe"), + ("INTERNAL", "INTERNAL - Dimagi must create subscription"), + ("TRIAL", "TRIAL- This is a Trial Plan"), + ("ARCHIVED", "ARCHIVED - hidden from subscription change forms"), + ], + default="INTERNAL", + max_length=10, + ), + ), + ] diff --git a/corehq/apps/accounting/models.py b/corehq/apps/accounting/models.py index 131b454eb6e6..df5296bd3d0d 100644 --- a/corehq/apps/accounting/models.py +++ b/corehq/apps/accounting/models.py @@ -171,14 +171,12 @@ class SoftwarePlanVisibility(object): PUBLIC = "PUBLIC" INTERNAL = "INTERNAL" TRIAL = "TRIAL" - ANNUAL = "ANNUAL" ARCHIVED = "ARCHIVED" CHOICES = ( (PUBLIC, "PUBLIC - Anyone can subscribe"), (INTERNAL, "INTERNAL - Dimagi must create subscription"), (TRIAL, "TRIAL- This is a Trial Plan"), (ARCHIVED, "ARCHIVED - hidden from subscription change forms"), - (ANNUAL, "ANNUAL - public plans that on annual pricing"), ) diff --git a/corehq/apps/accounting/tests/test_ensure_plans.py b/corehq/apps/accounting/tests/test_ensure_plans.py index d1af692a00ab..2ceef0f6d4ad 100644 --- a/corehq/apps/accounting/tests/test_ensure_plans.py +++ b/corehq/apps/accounting/tests/test_ensure_plans.py @@ -104,7 +104,8 @@ def _test_plan_versions_ensured(self, bootstrap_config): ) self.assertEqual(sms_feature_rate.per_excess_fee, 0) - expected_visibility = (SoftwarePlanVisibility.ANNUAL - if is_annual_plan else SoftwarePlanVisibility.PUBLIC) + expected_visibility = (SoftwarePlanVisibility.INTERNAL + if edition == SoftwarePlanEdition.ENTERPRISE + else SoftwarePlanVisibility.PUBLIC) self.assertEqual(software_plan_version.plan.visibility, expected_visibility) self.assertEqual(software_plan_version.plan.is_annual_plan, is_annual_plan) diff --git a/corehq/apps/api/resources/__init__.py b/corehq/apps/api/resources/__init__.py index 65ce4e44a099..2e1e1d2b0e4a 100644 --- a/corehq/apps/api/resources/__init__.py +++ b/corehq/apps/api/resources/__init__.py @@ -1,5 +1,6 @@ import json +from django.core.exceptions import ValidationError from django.http import HttpResponse from django.urls import NoReverseMatch @@ -114,6 +115,22 @@ def dispatch(self, request_type, request, **kwargs): content_type="application/json", status=401)) + def alter_deserialized_detail_data(self, request, data): + """Provide a hook for data validation + + Subclasses may implement ``validate_deserialized_data`` that + raises ``django.core.exceptions.ValidationError`` if the submitted + data is not valid. This is designed to work conveniently with + ``corehq.util.validation.JSONSchemaValidator``. + """ + data = super().alter_deserialized_detail_data(request, data) + if hasattr(self, "validate_deserialized_data"): + try: + self.validate_deserialized_data(data) + except ValidationError as error: + raise ImmediateHttpResponse(self.error_response(request, error.messages)) + return data + def get_required_privilege(self): return privileges.API_ACCESS diff --git a/corehq/apps/api/tests/lookup_table_resources.py b/corehq/apps/api/tests/lookup_table_resources.py index 2a36032d3e1f..11a97ddfb3c2 100644 --- a/corehq/apps/api/tests/lookup_table_resources.py +++ b/corehq/apps/api/tests/lookup_table_resources.py @@ -237,6 +237,35 @@ def test_update(self): self.assertEqual(data_type.fields[0].properties, ['lang', 'name']) self.assertEqual(data_type.item_attributes, ['X']) + def test_update_field_name(self): + lookup_table = { + "fields": [{"name": "property", "properties": ["value"]}], + "tag": "lookup_table", + } + + response = self._assert_auth_post_resource( + self.single_endpoint(self.data_type.id), json.dumps(lookup_table), method="PUT") + print(response.content) # for debugging errors + data_type = LookupTable.objects.get(id=self.data_type.id) + self.assertEqual(data_type.fields[0].field_name, 'property') + + def test_update_fails_with_two_field_names(self): + lookup_table = { + "fields": [{"name": "property", "field_name": "prop"}], + "tag": "lookup_table", + } + + response = self._assert_auth_post_resource( + self.single_endpoint(self.data_type.id), json.dumps(lookup_table), method="PUT") + self.assertEqual(response.status_code, 400) + errors = json.loads(response.content.decode("utf-8")) + print(errors) + self.assertIn("Failed validating 'not' in schema", errors[0]) + self.assertIn("{'not': {'required': ['field_name']}}", errors[0]) + self.assertIn("Failed validating 'not' in schema", errors[1]) + self.assertIn("{'not': {'required': ['name']}}", errors[1]) + self.assertEqual(len(errors), 2) + class TestLookupTableItemResourceV06(APIResourceTest): resource = LookupTableItemResource @@ -328,6 +357,58 @@ def test_update(self): 'cool_attr_value' ) + def test_create_with_bad_properties(self): + data_item_json = self._get_data_item_create() + data_item_json["fields"]["state_name"]["field_list"][0]["properties"] = [] + response = self._assert_auth_post_resource( + self.list_endpoint, + json.dumps(data_item_json), + content_type='application/json', + ) + self.assertEqual(response.status_code, 400) + errors = json.loads(response.content.decode("utf-8")) + print(errors) + self.assertIn("[] is not of type 'object':", errors[0]) + data_item = LookupTableRow.objects.filter(domain=self.domain.name).first() + self.assertIsNone(data_item) + + def test_update_field_value(self): + data_item = self._create_data_item() + data_item_update = self._get_data_item_update() + data_item_update["fields"]["state_name"]["field_list"][0] = { + "value": "Mass.", + "properties": {"lang": "en"}, + } + response = self._assert_auth_post_resource( + self.single_endpoint(data_item.id.hex), + json.dumps(data_item_update), + method="PUT", + ) + print(response.content) # for debugging errors + row = LookupTableRow.objects.filter(domain=self.domain.name).first() + self.assertEqual(row.fields["state_name"][0].value, 'Mass.') + + def test_update_fails_with_two_field_values(self): + data_item = self._create_data_item() + data_item_update = self._get_data_item_update() + data_item_update["fields"]["state_name"]["field_list"][0] = { + "value": "Mass.", + "field_value": "Mass...", + } + response = self._assert_auth_post_resource( + self.single_endpoint(data_item.id.hex), + json.dumps(data_item_update), + method="PUT", + ) + self.assertEqual(response.status_code, 400) + errors = json.loads(response.content.decode("utf-8")) + print(errors) + self.assertIn("Failed validating 'not' in schema", errors[0]) + self.assertIn("{'not': {'required': ['field_value']}}", errors[0]) + self.assertIn("Failed validating 'not' in schema", errors[1]) + self.assertIn("{'not': {'required': ['value']}}", errors[1]) + self.assertEqual(len(errors), 2) + class TestLookupTableItemResourceV05(TestLookupTableItemResourceV06): resource = LookupTableItemResource diff --git a/corehq/apps/app_execution/db_accessors.py b/corehq/apps/app_execution/db_accessors.py index de9ea05a9a5b..b23e281868ee 100644 --- a/corehq/apps/app_execution/db_accessors.py +++ b/corehq/apps/app_execution/db_accessors.py @@ -1,4 +1,7 @@ -from django.db.models import Avg, DateTimeField, DurationField, ExpressionWrapper, F, Max +from collections import defaultdict +from datetime import timedelta + +from django.db.models import Avg, Count, DateTimeField, DurationField, ExpressionWrapper, F, Max from django.db.models.functions import Trunc from corehq.apps.app_execution.models import AppExecutionLog, AppWorkflowConfig @@ -16,28 +19,77 @@ def get_avg_duration_data(domain, start, end, workflow_id=None): ).values("date", "workflow_id") .annotate(avg_duration=Avg('duration')) .annotate(max_duration=Max('duration')) - .order_by("workflow_id", "date") ) - data = [] - seen_workflows = set() + data = defaultdict(list) + seen_dates = defaultdict(set) for row in chart_logs: - if row["workflow_id"] not in seen_workflows: - seen_workflows.add(row["workflow_id"]) - data.append({ - "key": row["workflow_id"], - "values": [] - }) - data[-1]["values"].append({ + data[row["workflow_id"]].append({ "date": row["date"].isoformat(), "avg_duration": row["avg_duration"].total_seconds(), "max_duration": row["max_duration"].total_seconds(), }) + seen_dates[row["workflow_id"]].add(row["date"]) + + start = start.replace(minute=0, second=0, microsecond=0) + current = start + while current < end: + for workflow_id, dates in seen_dates.items(): + if current not in dates: + data[workflow_id].append({"date": current.isoformat(), "avg_duration": None, "max_duration": None}) + current += timedelta(hours=1) workflow_names = { workflow["id"]: workflow["name"] - for workflow in AppWorkflowConfig.objects.filter(id__in=seen_workflows).values("id", "name") + for workflow in AppWorkflowConfig.objects.filter(id__in=list(data)).values("id", "name") } - for workflow_data in data: - workflow_data["label"] = workflow_names[workflow_data["key"]] - return data + return [ + { + "key": workflow_id, + "label": workflow_names[workflow_id], + "values": sorted(data, key=lambda x: x["date"]) + } + for workflow_id, data in data.items() + ] + + +def get_status_data(domain, start, end, workflow_id=None): + query = AppExecutionLog.objects.filter(workflow__domain=domain, started__gte=start, started__lt=end) + if workflow_id: + query = query.filter(workflow_id=workflow_id) + + chart_logs = ( + query.annotate(date=Trunc("started", "hour", output_field=DateTimeField())) + .values("date", "success") + .annotate(count=Count("success")) + ) + + success = [] + error = [] + seen_success_dates = set() + seen_error_dates = set() + for row in chart_logs: + item = { + "date": row["date"].isoformat(), + "count": row["count"], + } + if row["success"]: + success.append(item) + seen_success_dates.add(row["date"]) + else: + error.append(item) + seen_error_dates.add(row["date"]) + + start = start.replace(minute=0, second=0, microsecond=0) + current = start + while current < end: + if current not in seen_error_dates: + error.append({"date": current.isoformat(), "count": 0}) + if current not in seen_success_dates: + success.append({"date": current.isoformat(), "count": 0}) + current += timedelta(hours=1) + + return [ + {"key": "Success", "values": sorted(success, key=lambda x: x["date"])}, + {"key": "Error", "values": sorted(error, key=lambda x: x["date"])}, + ] diff --git a/corehq/apps/app_execution/forms.py b/corehq/apps/app_execution/forms.py index 84bfc94258d2..4d5e7c491682 100644 --- a/corehq/apps/app_execution/forms.py +++ b/corehq/apps/app_execution/forms.py @@ -13,10 +13,10 @@ class AppWorkflowConfigForm(forms.ModelForm): - run_every = forms.IntegerField(min_value=1, required=False, label="Run Every (minutes)") - username = forms.CharField(max_length=255, label="Username", - help_text="Username of the user to run the workflow") - har_file = forms.FileField(label="HAR File", required=False) + run_every = forms.IntegerField(min_value=1, required=False, label=_("Run Every (minutes)")) + username = forms.CharField(max_length=255, label=_("Username"), + help_text=_("Username of the user to run the workflow")) + har_file = forms.FileField(label=_("HAR File"), required=False) class Meta: model = AppWorkflowConfig @@ -52,6 +52,7 @@ def __init__(self, request, *args, **kwargs): if request.user.is_superuser: fields += ["run_every", "notification_emails"] + har_help = _("HAR file recording should start with the selection of the app (navigate_menu_start).") self.helper.layout = crispy.Layout( crispy.Div( crispy.Div( @@ -59,23 +60,22 @@ def __init__(self, request, *args, **kwargs): css_class="col", ), crispy.Div( - crispy.HTML("
HAR file recording should start with the " - "selection of the app (navigate_menu_start).
"), + crispy.HTML(f"{har_help}
"), "har_file", twbscrispy.StrictButton( - "Populate workflow from HAR file", + _("Populate workflow from HAR file"), type='submit', css_class='btn-secondary', name="import_har", value="1", formnovalidate=True, ), crispy.HTML(""), - crispy.HTML("
Workflow:
"), + crispy.HTML(f"{_('Workflow:')}
"), InlineField("workflow"), css_class="col" ), css_class="row mb-3" ), hqcrispy.FormActions( - twbscrispy.StrictButton("Save", type='submit', css_class='btn-primary') + twbscrispy.StrictButton(_("Save"), type='submit', css_class='btn-primary') ), ) @@ -98,7 +98,9 @@ def clean_app_id(self): try: get_brief_app(domain, app_id) except NoResultFound: - raise forms.ValidationError(f"App not found in domain: {domain}:{app_id}") + raise forms.ValidationError(_("App not found in domain: {domain}:{app_id}").format( + domain=domain, app_id=app_id + )) return app_id diff --git a/corehq/apps/app_execution/models.py b/corehq/apps/app_execution/models.py index 12f716cae364..0a75457267b9 100644 --- a/corehq/apps/app_execution/models.py +++ b/corehq/apps/app_execution/models.py @@ -11,6 +11,7 @@ from corehq.apps.app_manager.dbaccessors import get_brief_app from corehq.sql_db.functions import MakeInterval from corehq.util.jsonattrs import AttrsObject +from django.utils.translation import gettext_lazy class AppWorkflowManager(models.Manager): @@ -23,9 +24,9 @@ def get_due(self): class AppWorkflowConfig(models.Model): FORM_MODE_CHOICES = [ - (const.FORM_MODE_HUMAN, "Human: Answer each question individually and submit form"), - (const.FORM_MODE_NO_SUBMIT, "No Submit: Answer all questions but don't submit the form"), - (const.FORM_MODE_IGNORE, "Ignore: Do not complete or submit forms"), + (const.FORM_MODE_HUMAN, gettext_lazy("Human: Answer each question individually and submit form")), + (const.FORM_MODE_NO_SUBMIT, gettext_lazy("No Submit: Answer all questions but don't submit the form")), + (const.FORM_MODE_IGNORE, gettext_lazy("Ignore: Do not complete or submit forms")), ] name = models.CharField(max_length=255) domain = models.CharField(max_length=255) @@ -34,11 +35,12 @@ class AppWorkflowConfig(models.Model): django_user = models.ForeignKey(User, on_delete=models.CASCADE) workflow = AttrsObject(AppWorkflow) form_mode = models.CharField(max_length=255, choices=FORM_MODE_CHOICES) - sync_before_run = models.BooleanField(default=False, help_text="Sync user data before running") - run_every = models.IntegerField(help_text="Number of minutes between runs", null=True, blank=True) + sync_before_run = models.BooleanField(default=False, help_text=gettext_lazy("Sync user data before running")) + run_every = models.IntegerField( + help_text=gettext_lazy("Number of minutes between runs"), null=True, blank=True) last_run = models.DateTimeField(null=True, blank=True) notification_emails = ArrayField( - models.EmailField(), default=list, help_text="Emails to notify on failure", blank=True + models.EmailField(), default=list, help_text=gettext_lazy("Emails to notify on failure"), blank=True ) objects = AppWorkflowManager() diff --git a/corehq/apps/app_execution/static/app_execution/js/workflow_charts.js b/corehq/apps/app_execution/static/app_execution/js/workflow_charts.js new file mode 100644 index 000000000000..6312a3b44766 --- /dev/null +++ b/corehq/apps/app_execution/static/app_execution/js/workflow_charts.js @@ -0,0 +1,100 @@ +'use strict'; +hqDefine("app_execution/js/workflow_charts", [ + 'jquery', + 'moment/moment', + 'd3/d3.min', + 'nvd3/nv.d3.latest.min', // version 1.1.10 has a bug that affects line charts with multiple series +], function ( + $, moment, d3, nv +) { + + function getSeries(data, includeSeries) { + return includeSeries.map((seriesMeta) => { + return { + // include key in the label to differentiate between series with the same label + key: `${data.label}${seriesMeta.label}[${data.key}]`, + values: data.values.map((item) => { + return { + x: moment(item.date), + y: item[seriesMeta.key], + }; + }), + }; + }); + } + + function buildChart(yLabel) { + let chart = nv.models.lineChart() + .showYAxis(true) + .showXAxis(true); + + chart.yAxis + .axisLabel(yLabel); + chart.forceY(0); + chart.xScale(d3.time.scale()); + chart.margin({left: 80, bottom: 100}); + chart.xAxis.rotateLabels(-45) + .tickFormat(function (d) { + return moment(d).format("MMM DD [@] HH"); + }); + + nv.utils.windowResize(chart.update); + return chart; + } + + function setupTimingChart(data, includeSeries) { + const timingSeries = data.timing.flatMap((series) => getSeries(series, includeSeries)); + + nv.addGraph(() => { + let chart = buildChart(gettext("Seconds")); + chart.yAxis.tickFormat(d3.format(".1f")); + // remove the key from the label + chart.legend.key((d) => d.key.split("[")[0]); + chart.tooltip.keyFormatter((d) => { + return d.split("[")[0]; + }); + + d3.select('#timing_linechart svg') + .datum(timingSeries) + .call(chart); + + return chart; + }); + } + + function setupStatusChart(data) { + const colors = { + "Success": "#6dcc66", + "Error": "#f44", + }; + let seriesData = data.status.map((series) => { + return { + key: series.key, + values: series.values.map((item) => { + return { + x: moment(item.date), + y: item.count, + }; + }), + color: colors[series.key], + }; + }); + + nv.addGraph(() => { + let chart = buildChart(gettext("Chart")); + + d3.select('#status_barchart svg') + .datum(seriesData) + .call(chart); + + return chart; + }); + } + + $(document).ready(function () { + const data = JSON.parse(document.getElementById('chart_data').textContent); + const includeSeries = JSON.parse(document.getElementById('timingSeries').textContent); + setupTimingChart(data, includeSeries); + setupStatusChart(data); + }); +}); diff --git a/corehq/apps/app_execution/static/app_execution/js/workflow_logs.js b/corehq/apps/app_execution/static/app_execution/js/workflow_logs.js index 0bcca816ac71..c07d5b0e9527 100644 --- a/corehq/apps/app_execution/static/app_execution/js/workflow_logs.js +++ b/corehq/apps/app_execution/static/app_execution/js/workflow_logs.js @@ -1,28 +1,51 @@ +'use strict'; hqDefine("app_execution/js/workflow_logs", [ 'jquery', 'knockout', 'hqwebapp/js/initial_page_data', - 'app_execution/js/workflow_timing_chart', + 'hqwebapp/js/tempus_dominus', + 'app_execution/js/workflow_charts', 'hqwebapp/js/bootstrap5/components.ko', -], function ($, ko, initialPageData) { +], function ($, ko, initialPageData, hqTempusDominus) { let logsModel = function () { let self = {}; + self.statusFilter = ko.observable(""); + let allDatesText = gettext("Show All Dates"); + self.dateRange = ko.observable(allDatesText); self.items = ko.observableArray(); self.totalItems = ko.observable(initialPageData.get('total_items')); self.perPage = ko.observable(25); self.goToPage = function (page) { let params = {page: page, per_page: self.perPage()}; const url = initialPageData.reverse('app_execution:logs_json'); + if (self.statusFilter()) { + params.status = self.statusFilter(); + } + if (self.dateRange() && self.dateRange() !== allDatesText) { + const separator = hqTempusDominus.getDateRangeSeparator(), + dates = self.dateRange().split(separator); + params.startDate = dates[0]; + params.endDate = dates[1] || dates[0]; + } $.getJSON(url, params, function (data) { self.items(data.logs); }); }; + self.filter = ko.computed(() => { + self.statusFilter(); + if (self.dateRange().includes(hqTempusDominus.getDateRangeSeparator())) { + self.goToPage(1); + } + }).extend({throttle: 500}); + self.onLoad = function () { self.goToPage(1); }; + hqTempusDominus.createDefaultDateRangePicker(document.getElementById('id_date_range')); + return self; }; diff --git a/corehq/apps/app_execution/static/app_execution/js/workflow_timing_chart.js b/corehq/apps/app_execution/static/app_execution/js/workflow_timing_chart.js deleted file mode 100644 index 63fa7b9e7f91..000000000000 --- a/corehq/apps/app_execution/static/app_execution/js/workflow_timing_chart.js +++ /dev/null @@ -1,66 +0,0 @@ -// /* globals moment */ -hqDefine("app_execution/js/workflow_timing_chart", [ - 'jquery', - 'moment/moment', - 'd3/d3.min', - 'nvd3/nv.d3.latest.min', // version 1.1.10 has a bug that affects line charts with multiple series -], function ( - $, moment, d3, nv -) { - - function getSeries(data, includeSeries) { - return includeSeries.map((seriesMeta) => { - return { - // include key in the label to differentiate between series with the same label - label: `${data.label}${seriesMeta.label}`, - key: `${data.label}${seriesMeta.label}[${data.key}]`, - values: data.values.map((item) => { - return { - x: moment(item.date), - y: item[seriesMeta.key], - }; - }), - }; - }); - } - - function setupLineChart(data, includeSeries) { - const timingSeries = data.flatMap((series) => getSeries(series, includeSeries)); - nv.addGraph(function () { - let chart = nv.models.lineChart() - .showYAxis(true) - .showXAxis(true); - - chart.yAxis - .axisLabel('Seconds') - .tickFormat(d3.format(".1f")); - chart.forceY(0); - chart.xScale(d3.time.scale()); - chart.margin({left: 80, bottom: 100}); - chart.xAxis.rotateLabels(-45) - .tickFormat(function (d) { - return moment(d).format("MMM DD [@] HH"); - }); - - // remove the key from the label - chart.legend.key((d) => d.key.split("[")[0]); - chart.tooltip.keyFormatter((d) => { - return d.split("[")[0]; - }); - - d3.select('#timing_linechart svg') - .datum(timingSeries) - .call(chart); - - nv.utils.windowResize(chart.update); - return chart; - }); - - } - - $(document).ready(function () { - const chartData = JSON.parse(document.getElementById('chart_data').textContent); - const includeSeries = JSON.parse(document.getElementById('includeSeries').textContent); - setupLineChart(chartData, includeSeries); - }); -}); diff --git a/corehq/apps/app_execution/templates/app_execution/components/logs.html b/corehq/apps/app_execution/templates/app_execution/components/logs.html index 254a40b02d93..a0d174af09ca 100644 --- a/corehq/apps/app_execution/templates/app_execution/components/logs.html +++ b/corehq/apps/app_execution/templates/app_execution/components/logs.html @@ -1,19 +1,20 @@ +{% load i18n %}{{ output }}{% if success %}
{{ workflow_json }}
Name | -App Name | -User | -Last Run | -Last 10 Runs | +{% translate 'Name' %} | +{% translate 'App Name' %} | +{% translate 'User' %} | +{% translate 'Last Run' %} | +{% translate 'Last 10 Runs' %} | - | @@ -66,7 +72,7 @@
---|
Status | -Started | -Duration | +{% translate 'Status' %} | +{% translate 'Started' %} | +{% translate 'Duration' %} | - | Details | +{% translate 'Details' %} |
---|