diff --git a/CHANGES.rst b/CHANGES.rst index 16d4c0cb1b..816536a1ff 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -7,6 +7,9 @@ Changelog **Added** +- #1506 Specification non-compliant viewlet in Sample +- #1506 Sample results ranges out-of-date viewlet in Sample +- #1506 Warn icon in analyses when range is not compliant with Specification - #1492 Dynamic Analysis Specifications - #1507 Support for semi-colon character separator in CCEmails field - #1499 Moved navigation portlet into core @@ -36,6 +39,9 @@ Changelog **Fixed** +- #1506 Changes via manage results don't get applied to partitions +- #1506 Fix recursion error when getting dependencies through Calculation +- #1506 setter from ARAnalysisField does no longer return values - #1512 QC Analyses listing appears empty in Sample view - #1510 Error when viewing a Sample w/o Batch as client contact - #1511 Links to partitions for Internal Use are displayed in partitions viewlet diff --git a/bika/lims/api/analysis.py b/bika/lims/api/analysis.py index 0d141a90ab..e466e53600 100644 --- a/bika/lims/api/analysis.py +++ b/bika/lims/api/analysis.py @@ -27,6 +27,8 @@ IResultOutOfRange from zope.component._api import getAdapters +from bika.lims.interfaces.analysis import IRequestAnalysis + def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or @@ -148,3 +150,33 @@ def get_formatted_interval(results_range, default=_marker): max_bracket = max_operator == 'leq' and ']' or ')' return "{}{};{}{}".format(min_bracket, min_str, max_str, max_bracket) + + +def is_result_range_compliant(analysis): + """Returns whether the result range from the analysis matches with the + result range for the service counterpart defined in the Sample + """ + if not IRequestAnalysis.providedBy(analysis): + return True + + rr = analysis.getResultsRange() + service_uid = rr.get("uid", None) + if not api.is_uid(service_uid): + return True + + # Compare with Sample + sample = analysis.getRequest() + + # If no Specification is set, assume is compliant + specification = sample.getRawSpecification() + if not specification: + return True + + # Compare with the Specification that was initially set to the Sample + sample_rr = sample.getResultsRange(search_by=service_uid) + if not sample_rr: + # This service is not defined in Sample's ResultsRange, we + # assume this *does not* break the compliance + return True + + return rr == sample_rr diff --git a/bika/lims/browser/analyses/view.py b/bika/lims/browser/analyses/view.py index d47a3687c7..dcf2cab9bc 100644 --- a/bika/lims/browser/analyses/view.py +++ b/bika/lims/browser/analyses/view.py @@ -25,11 +25,15 @@ from DateTime import DateTime from Products.Archetypes.config import REFERENCE_CATALOG from Products.CMFPlone.utils import safe_unicode +from plone.memoize import view as viewcache +from zope.component import getAdapters + from bika.lims import api from bika.lims import bikaMessageFactory as _ from bika.lims import logger from bika.lims.api.analysis import get_formatted_interval from bika.lims.api.analysis import is_out_of_range +from bika.lims.api.analysis import is_result_range_compliant from bika.lims.browser.bika_listing import BikaListingView from bika.lims.catalog import CATALOG_ANALYSIS_LISTING from bika.lims.config import LDL @@ -51,8 +55,6 @@ from bika.lims.utils import get_link from bika.lims.utils import t from bika.lims.utils.analysis import format_uncertainty -from plone.memoize import view as viewcache -from zope.component import getAdapters class AnalysesView(BikaListingView): @@ -1033,13 +1035,25 @@ def _folder_item_specifications(self, analysis_brain, item): # Show an icon if out of range out_range, out_shoulders = is_out_of_range(analysis_brain) - if not out_range: - return - # At least is out of range - img = get_image("exclamation.png", title=_("Result out of range")) - if not out_shoulders: - img = get_image("warning.png", title=_("Result in shoulder range")) - self._append_html_element(item, "Result", img) + if out_range: + msg = _("Result out of range") + img = get_image("exclamation.png", title=msg) + if not out_shoulders: + msg = _("Result in shoulder range") + img = get_image("warning.png", title=msg) + self._append_html_element(item, "Result", img) + + # Show an icon if the analysis range is different from the Sample spec + if IAnalysisRequest.providedBy(self.context): + analysis = self.get_object(analysis_brain) + if not is_result_range_compliant(analysis): + service_uid = analysis_brain.getServiceUID + original = self.context.getResultsRange(search_by=service_uid) + original = get_formatted_interval(original, "") + msg = _("Result range is different from Specification: {}" + .format(original)) + img = get_image("warning.png", title=msg) + self._append_html_element(item, "Specification", img) def _folder_item_verify_icons(self, analysis_brain, item): """Set the analysis' verification icons to the item passed in. diff --git a/bika/lims/browser/analysisrequest/add2.py b/bika/lims/browser/analysisrequest/add2.py index 460398c965..81a26a6320 100644 --- a/bika/lims/browser/analysisrequest/add2.py +++ b/bika/lims/browser/analysisrequest/add2.py @@ -1670,7 +1670,7 @@ def ajax_submit(self): client, self.request, record, - specifications=specifications + results_ranges=specifications ) except (KeyError, RuntimeError) as e: actions.resume() diff --git a/bika/lims/browser/analysisrequest/manage_analyses.py b/bika/lims/browser/analysisrequest/manage_analyses.py index 1db380950c..cca18dafb8 100644 --- a/bika/lims/browser/analysisrequest/manage_analyses.py +++ b/bika/lims/browser/analysisrequest/manage_analyses.py @@ -142,12 +142,24 @@ def show_ar_specs(self): @view.memoize def get_results_range(self): - """Get the results Range from the AR + """Get the results Range from the Sample, but gives priority to the + result ranges set in analyses. This guarantees that result ranges for + already present analyses are not overriden after form submission """ - spec = self.context.getResultsRange() - if spec: - return dicts_to_dict(spec, "keyword") - return ResultsRangeDict() + # Extract the result ranges from Sample analyses + analyses = self.analyses.values() + analyses_rrs = map(lambda an: an.getResultsRange(), analyses) + analyses_rrs = filter(None, analyses_rrs) + rrs = dicts_to_dict(analyses_rrs, "keyword") + + # Bail out ranges from Sample that are already present in analyses + sample_rrs = self.context.getResultsRange() + sample_rrs = filter(lambda rr: rr["keyword"] not in rrs, sample_rrs) + sample_rrs = dicts_to_dict(sample_rrs, "keyword") + + # Extend result ranges with those from Sample + rrs.update(sample_rrs) + return rrs @view.memoize def get_currency_symbol(self): diff --git a/bika/lims/browser/analysisspec.py b/bika/lims/browser/analysisspec.py deleted file mode 100644 index 3608e2cd84..0000000000 --- a/bika/lims/browser/analysisspec.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -# -# This file is part of SENAITE.CORE. -# -# SENAITE.CORE is free software: you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright 2018-2019 by it's authors. -# Some rights reserved, see README and LICENSE. - -from bika.lims.config import POINTS_OF_CAPTURE -from bika.lims.interfaces import IAnalysisSpec -from bika.lims.interfaces import IJSONReadExtender -from zope.component import adapts -from zope.interface import implements - -class JSONReadExtender(object): - """Adds the UID to the ResultsRange dict. This will go away - when we stop using keywords for this stuff. - """ - - implements(IJSONReadExtender) - adapts(IAnalysisSpec) - - def __init__(self, context): - self.context = context - - def __call__(self, request, data): - bsc = self.context.bika_setup_catalog - rr = [] - for i, x in enumerate(data.get("ResultsRange", [])): - keyword = x.get("keyword") - proxies = bsc(portal_type="AnalysisService", getKeyword=keyword) - if proxies: - data['ResultsRange'][i]['uid'] = proxies[0].UID diff --git a/bika/lims/browser/analysisspec.zcml b/bika/lims/browser/analysisspec.zcml deleted file mode 100644 index d3951601f2..0000000000 --- a/bika/lims/browser/analysisspec.zcml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - diff --git a/bika/lims/browser/configure.zcml b/bika/lims/browser/configure.zcml index f7bcd77559..a7eb065517 100644 --- a/bika/lims/browser/configure.zcml +++ b/bika/lims/browser/configure.zcml @@ -11,7 +11,6 @@ - diff --git a/bika/lims/browser/fields/__init__.py b/bika/lims/browser/fields/__init__.py index d366dca597..4fb1102419 100644 --- a/bika/lims/browser/fields/__init__.py +++ b/bika/lims/browser/fields/__init__.py @@ -30,3 +30,5 @@ from .proxyfield import ProxyField from .uidreferencefield import UIDReferenceField from .emailsfield import EmailsField +from .resultrangefield import ResultRangeField +from .resultsrangesfield import ResultsRangesField diff --git a/bika/lims/browser/fields/aranalysesfield.py b/bika/lims/browser/fields/aranalysesfield.py index f3fbfd6082..49b0cf5acf 100644 --- a/bika/lims/browser/fields/aranalysesfield.py +++ b/bika/lims/browser/fields/aranalysesfield.py @@ -25,15 +25,19 @@ from Products.Archetypes.Registry import registerField from Products.Archetypes.public import Field from Products.Archetypes.public import ObjectField +from zope.interface import alsoProvides from zope.interface import implements +from zope.interface import noLongerProvides from bika.lims import api from bika.lims import logger from bika.lims.api.security import check_permission from bika.lims.catalog import CATALOG_ANALYSIS_LISTING +from bika.lims.catalog import SETUP_CATALOG from bika.lims.interfaces import IARAnalysesField from bika.lims.interfaces import IAnalysis from bika.lims.interfaces import IAnalysisService +from bika.lims.interfaces import IInternalUse from bika.lims.interfaces import ISubmitted from bika.lims.permissions import AddAnalysis from bika.lims.utils.analysis import create_analysis @@ -127,139 +131,171 @@ def set(self, instance, items, prices=None, specs=None, hidden=None, **kw): services = filter(None, map(self._to_service, items)) # Calculate dependencies - # FIXME Infinite recursion error possible here, if the formula includes - # the Keyword of the Service that includes the Calculation dependencies = map(lambda s: s.getServiceDependencies(), services) dependencies = list(itertools.chain.from_iterable(dependencies)) # Merge dependencies and services services = set(services + dependencies) - # Modify existing AR specs with new form values of selected analyses. - self._update_specs(instance, specs) - - # Create a mapping of Service UID -> Hidden status - if hidden is None: - hidden = [] - hidden = dict(map(lambda d: (d.get("uid"), d.get("hidden")), hidden)) - - # Ensure we have a prices dictionary - if prices is None: - prices = dict() + # Modify existing AR specs with new form values of selected analyses + specs = self.resolve_specs(instance, specs) # Add analyses - new_analyses = map(lambda service: - self.add_analysis(instance, service, prices, hidden), - services) - new_analyses = filter(None, new_analyses) + params = dict(prices=prices, hidden=hidden, specs=specs) + map(lambda serv: self.add_analysis(instance, serv, **params), services) - # Remove analyses - # Since Manage Analyses view displays the analyses from partitions, we - # also need to take them into consideration here. Analyses from - # ancestors can be omitted. + # Get all analyses (those from descendants included) analyses = instance.objectValues("Analysis") analyses.extend(self.get_analyses_from_descendants(instance)) - # Service UIDs - service_uids = map(api.get_uid, services) + # Bail out those not in services list or submitted + uids = map(api.get_uid, services) + to_remove = filter(lambda an: an.getServiceUID() not in uids, analyses) + to_remove = filter(lambda an: not ISubmitted.providedBy(an), to_remove) - # Assigned Attachments - assigned_attachments = [] + # Remove analyses + map(self.remove_analysis, to_remove) - for analysis in analyses: - service_uid = analysis.getServiceUID() + def resolve_specs(self, instance, results_ranges): + """Returns a dictionary where the key is the service_uid and the value + is its results range. The dictionary is made by extending the + results_ranges passed-in with the Sample's ResultsRanges (a copy of the + specifications initially set) + """ + rrs = results_ranges or [] - # Skip if the Service is selected - if service_uid in service_uids: - continue + # Sample's Results ranges + sample_rrs = instance.getResultsRange() - # Skip non-open Analyses - if ISubmitted.providedBy(analysis): - continue + # Resolve results_ranges passed-in to make sure they contain uid + rrs = map(lambda rr: self.resolve_uid(rr), rrs) - # Remember assigned attachments - # https://github.com/senaite/senaite.core/issues/1025 - assigned_attachments.extend(analysis.getAttachment()) - analysis.setAttachment([]) + # Append those from sample that are missing in the ranges passed-in + service_uids = map(lambda rr: rr["uid"], rrs) + rrs.extend(filter(lambda rr: rr["uid"] not in service_uids, sample_rrs)) - # If it is assigned to a worksheet, unassign it before deletion. - worksheet = analysis.getWorksheet() - if worksheet: - worksheet.removeAnalysis(analysis) + # Create a dict for easy access to results ranges + return dict(map(lambda rr: (rr["uid"], rr), rrs)) - # Remove the analysis - # Note the analysis might belong to a partition - analysis.aq_parent.manage_delObjects(ids=[api.get_id(analysis)]) + def resolve_uid(self, result_range): + """Resolves the uid key for the result_range passed in if it does not + exist when contains a keyword + """ + value = result_range.copy() + uid = value.get("uid") + if api.is_uid(uid) and uid != "0": + return value + + # uid key does not exist or is not valid, try to infere from keyword + keyword = value.get("keyword") + if keyword: + query = dict(portal_type="AnalysisService", getKeyword=keyword) + brains = api.search(query, SETUP_CATALOG) + if len(brains) == 1: + uid = api.get_uid(brains[0]) + value["uid"] = uid + return value + + def add_analysis(self, instance, service, **kwargs): + service_uid = api.get_uid(service) - # Remove orphaned attachments - for attachment in assigned_attachments: - # only delete attachments which are no further linked - if not attachment.getLinkedAnalyses(): - logger.info( - "Deleting attachment: {}".format(attachment.getId())) - attachment_id = api.get_id(attachment) - api.get_parent(attachment).manage_delObjects(attachment_id) + # Ensure we have suitable parameters + specs = kwargs.get("specs") or {} - return new_analyses + # Get the hidden status for the service + hidden = kwargs.get("hidden") or [] + hidden = filter(lambda d: d.get("uid") == service_uid, hidden) + hidden = hidden and hidden[0].get("hidden") or service.getHidden() - def add_analysis(self, instance, service, prices, hidden): - service_uid = api.get_uid(service) - new_analysis = False + # Get the price for the service + prices = kwargs.get("prices") or {} + price = prices.get(service_uid) or service.getPrice() # Gets the analysis or creates the analysis for this service - # Note this analysis might not belong to this current instance, but - # from a descendant (partition) - analysis = self.resolve_analysis(instance, service) - if not analysis: + # Note this returns a list, because is possible to have multiple + # partitions with same analysis + analyses = self.resolve_analyses(instance, service) + if not analyses: # Create the analysis - new_analysis = True keyword = service.getKeyword() logger.info("Creating new analysis '{}'".format(keyword)) analysis = create_analysis(instance, service) + analyses.append(analysis) - # Set the hidden status - analysis.setHidden(hidden.get(service_uid, False)) + skip = ["cancelled", "retracted", "rejected"] + for analysis in analyses: + # Skip analyses to better not modify + if api.get_review_status(analysis) in skip: + continue - # Set the price of the Analysis - analysis.setPrice(prices.get(service_uid, service.getPrice())) + # Set the hidden status + analysis.setHidden(hidden) - # Only return the analysis if is a new one - if new_analysis: - return analysis + # Set the price of the Analysis + analysis.setPrice(price) - return None + # Set the internal use status + parent_sample = analysis.getRequest() + analysis.setInternalUse(parent_sample.getInternalUse()) + + # Set the result range to the analysis + analysis_rr = specs.get(service_uid) or analysis.getResultsRange() + analysis.setResultsRange(analysis_rr) + analysis.reindexObject() - def resolve_analysis(self, instance, service): - """Resolves an analysis for the service and instance + def remove_analysis(self, analysis): + """Removes a given analysis from the instance """ + # Remember assigned attachments + # https://github.com/senaite/senaite.core/issues/1025 + attachments = analysis.getAttachment() + analysis.setAttachment([]) + + # If assigned to a worksheet, unassign it before deletion + worksheet = analysis.getWorksheet() + if worksheet: + worksheet.removeAnalysis(analysis) + + # Remove the analysis + # Note the analysis might belong to a partition + analysis.aq_parent.manage_delObjects(ids=[api.get_id(analysis)]) + + # Remove orphaned attachments + for attachment in attachments: + if not attachment.getLinkedAnalyses(): + # only delete attachments which are no further linked + logger.info( + "Deleting attachment: {}".format(attachment.getId())) + attachment_id = api.get_id(attachment) + api.get_parent(attachment).manage_delObjects(attachment_id) + + def resolve_analyses(self, instance, service): + """Resolves analyses for the service and instance + It returns a list, cause for a given sample, multiple analyses for same + service can exist due to the possibility of having multiple partitions + """ + analyses = [] + # Does the analysis exists in this instance already? - analysis = self.get_from_instance(instance, service) - if analysis: - keyword = service.getKeyword() - logger.info("Analysis for '{}' already exists".format(keyword)) - return analysis + instance_analyses = self.get_from_instance(instance, service) + if instance_analyses: + analyses.extend(instance_analyses) # Does the analysis exists in an ancestor? from_ancestor = self.get_from_ancestor(instance, service) - if from_ancestor: + for ancestor_analysis in from_ancestor: # Move the analysis into this instance. The ancestor's # analysis will be masked otherwise - analysis_id = api.get_id(from_ancestor) + analysis_id = api.get_id(ancestor_analysis) logger.info("Analysis {} is from an ancestor".format(analysis_id)) - cp = from_ancestor.aq_parent.manage_cutObjects(analysis_id) + cp = ancestor_analysis.aq_parent.manage_cutObjects(analysis_id) instance.manage_pasteObjects(cp) - return instance._getOb(analysis_id) + analyses.append(instance._getOb(analysis_id)) - # Does the analysis exists in a descendant? + # Does the analysis exists in descendants? from_descendant = self.get_from_descendant(instance, service) - if from_descendant: - # The analysis already exists in a partition, keep it. The - # analysis from current instance will be masked otherwise - analysis_id = api.get_id(from_descendant) - logger.info("Analysis {} is from a descendant".format(analysis_id)) - return from_descendant - - return None + analyses.extend(from_descendant) + return analyses def get_analyses_from_descendants(self, instance): """Returns all the analyses from descendants @@ -270,48 +306,39 @@ def get_analyses_from_descendants(self, instance): return analyses def get_from_instance(self, instance, service): - """Returns an analysis for the given service from the instance + """Returns analyses for the given service from the instance """ service_uid = api.get_uid(service) - for analysis in instance.objectValues("Analysis"): - if analysis.getServiceUID() == service_uid: - return analysis - return None + analyses = instance.objectValues("Analysis") + # Filter those analyses with same keyword. Note that a Sample can + # contain more than one analysis with same keyword because of retests + return filter(lambda an: an.getServiceUID() == service_uid, analyses) def get_from_ancestor(self, instance, service): - """Returns an analysis for the given service from ancestors + """Returns analyses for the given service from ancestors """ ancestor = instance.getParentAnalysisRequest() if not ancestor: - return None + return [] - analysis = self.get_from_instance(ancestor, service) - return analysis or self.get_from_ancestor(ancestor, service) + analyses = self.get_from_instance(ancestor, service) + return analyses or self.get_from_ancestor(ancestor, service) def get_from_descendant(self, instance, service): - """Returns an analysis for the given service from descendants + """Returns analyses for the given service from descendants """ + analyses = [] for descendant in instance.getDescendants(): # Does the analysis exists in the current descendant? - analysis = self.get_from_instance(descendant, service) - if analysis: - return analysis + descendant_analyses = self.get_from_instance(descendant, service) + if descendant_analyses: + analyses.extend(descendant_analyses) # Search in descendants from current descendant - analysis = self.get_from_descendant(descendant, service) - if analysis: - return analysis - - return None + from_descendant = self.get_from_descendant(descendant, service) + analyses.extend(from_descendant) - def _get_services(self, full_objects=False): - """Fetch and return analysis service objects - """ - bsc = api.get_tool("bika_setup_catalog") - brains = bsc(portal_type="AnalysisService") - if full_objects: - return map(api.get_object, brains) - return brains + return analyses def _to_service(self, thing): """Convert to Analysis Service @@ -345,33 +372,6 @@ def _to_service(self, thing): "The object will be dismissed.".format(portal_type)) return None - def _update_specs(self, instance, specs): - """Update AR specifications - - :param instance: Analysis Request - :param specs: List of Specification Records - """ - - if specs is None: - return - - # N.B. we copy the records here, otherwise the spec will be written to - # the attached specification of this AR - rr = {item["keyword"]: item.copy() - for item in instance.getResultsRange()} - for spec in specs: - keyword = spec.get("keyword") - if keyword in rr: - # overwrite the instance specification only, if the specific - # analysis spec has min/max values set - if all([spec.get("min"), spec.get("max")]): - rr[keyword].update(spec) - else: - rr[keyword] = spec - else: - rr[keyword] = spec - return instance.setResultsRange(rr.values()) - registerField(ARAnalysesField, title="Analyses", diff --git a/bika/lims/browser/fields/configure.zcml b/bika/lims/browser/fields/configure.zcml index 9ebfc6645e..c2a8a090a9 100644 --- a/bika/lims/browser/fields/configure.zcml +++ b/bika/lims/browser/fields/configure.zcml @@ -1,7 +1,15 @@ + + + diff --git a/bika/lims/browser/fields/resultrangefield.py b/bika/lims/browser/fields/resultrangefield.py new file mode 100644 index 0000000000..446d2b317b --- /dev/null +++ b/bika/lims/browser/fields/resultrangefield.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# +# This file is part of SENAITE.CORE. +# +# SENAITE.CORE is free software: you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright 2018-2020 by it's authors. +# Some rights reserved, see README and LICENSE. + +from operator import itemgetter + +from Products.ATExtensions.field import RecordField +from Products.Archetypes.Registry import registerField +from Products.Archetypes.interfaces import IFieldDefaultProvider +from zope.interface import implements + +from bika.lims import bikaMessageFactory as _ +from bika.lims.interfaces.analysis import IRequestAnalysis + + +# A tuple of (subfield_id, subfield_label,) +SUB_FIELDS = ( + ("keyword", _("Analysis Service")), + ("min_operator", _("Min operator")), + ("min", _('Min')), + ("max_operator", _("Max operator")), + ("max", _('Max')), + ("warn_min", _('Min warn')), + ("warn_max", _('Max warn')), + ("hidemin", _('< Min')), + ("hidemax", _('> Max')), + ("rangecomment", _('Range Comment')), +) + + +class ResultRangeField(RecordField): + """A field that stores a results range + """ + _properties = RecordField._properties.copy() + _properties.update({ + "type": "results_range_field", + "subfields": map(itemgetter(0), SUB_FIELDS), + "subfield_labels": dict(SUB_FIELDS), + }) + + def set(self, instance, value, **kwargs): + from bika.lims.content.analysisspec import ResultsRangeDict + if isinstance(value, ResultsRangeDict): + # Better store a built-in dict so it will always be available even + # if ResultsRangeDict is removed or changed + value = dict(value) + + super(ResultRangeField, self).set(instance, value, **kwargs) + + def get(self, instance, **kwargs): + from bika.lims.content.analysisspec import ResultsRangeDict + value = super(ResultRangeField, self).get(instance, **kwargs) + if value: + return ResultsRangeDict(dict(value.items())) + return {} + + +registerField(ResultRangeField, title="ResultRange", + description="Used for storing a result range",) + + +class DefaultResultsRangeProvider(object): + """Default Results Range provider for analyses + This is used for backwards-compatibility for when the analysis' ResultsRange + was obtained directly from Sample's ResultsRanges field, before this: + https://github.com/senaite/senaite.core/pull/1506 + """ + implements(IFieldDefaultProvider) + + def __init__(self, context): + self.context = context + + def __call__(self): + """Get the default value. + """ + if not IRequestAnalysis.providedBy(self.context): + return {} + + # Get the AnalysisRequest to look at + analysis = self.context + sample = analysis.getRequest() + if not sample: + return {} + + # Search by keyword + field = sample.getField("ResultsRange") + keyword = analysis.getKeyword() + rr = field.get(sample, search_by=keyword) + if rr: + return rr + + # Try with uid (this shouldn't be necessary) + service_uid = analysis.getServiceUID() + return field.get(sample, search_by=service_uid) or {} diff --git a/bika/lims/browser/fields/resultsrangesfield.py b/bika/lims/browser/fields/resultsrangesfield.py new file mode 100644 index 0000000000..3e429274f4 --- /dev/null +++ b/bika/lims/browser/fields/resultsrangesfield.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# +# This file is part of SENAITE.CORE. +# +# SENAITE.CORE is free software: you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright 2018-2020 by it's authors. +# Some rights reserved, see README and LICENSE. + +from operator import itemgetter + +from Products.ATExtensions.field import RecordsField +from Products.Archetypes.Registry import registerField + +from bika.lims import api +from bika.lims.browser.fields.resultrangefield import SUB_FIELDS +from bika.lims.browser.widgets import AnalysisSpecificationWidget +from bika.lims.catalog import SETUP_CATALOG + + +class ResultsRangesField(RecordsField): + """A field that stores a list of results ranges + """ + _properties = RecordsField._properties.copy() + _properties.update({ + "type": "specifications", + "subfields": map(itemgetter(0), SUB_FIELDS), + "subfield_labels": dict(SUB_FIELDS), + "subfield_validators": { + "min": "analysisspecs_validator", + "max": "analysisspecs_validator", + }, + "required_subfields": ("keyword", ), + "widget": AnalysisSpecificationWidget, + }) + + def get(self, instance, **kwargs): + values = super(ResultsRangesField, self).get(instance, **kwargs) + + # If a keyword or an uid has been specified, return the result range + # for that uid or keyword only + if "search_by" in kwargs: + uid_or_keyword = kwargs.get("search_by") + if uid_or_keyword: + return self.getResultRange(values, uid_or_keyword) or {} + return {} + + # Convert the dict items to ResultRangeDict for easy handling + from bika.lims.content.analysisspec import ResultsRangeDict + return map(lambda val: ResultsRangeDict(dict(val.items())), values) + + def getResultRange(self, values, uid_keyword_service): + if not uid_keyword_service: + return None + + if api.is_object(uid_keyword_service): + uid_keyword_service = api.get_uid(uid_keyword_service) + + key = "keyword" + if api.is_uid(uid_keyword_service) and uid_keyword_service != "0": + # We always assume a uid of "0" refers to portal + key = "uid" + + # Find out the item for the given uid/keyword + from bika.lims.content.analysisspec import ResultsRangeDict + value = filter(lambda v: v.get(key) == uid_keyword_service, values) + return value and ResultsRangeDict(dict(value[0].items())) or None + + def _to_dict(self, value): + """Convert the records to persistent dictionaries + """ + # Resolve items to guarantee all them have the key uid + value = super(ResultsRangesField, self)._to_dict(value) + return map(self.resolve_uid, value) + + def resolve_uid(self, raw_dict): + """Returns a copy of the raw dictionary passed in, but with additional + key "uid". It's value is inferred from "keyword" if present + """ + value = raw_dict.copy() + uid = value.get("uid") + if api.is_uid(uid) and uid != "0": + return value + + # uid key does not exist or is not valid, try to infere from keyword + keyword = value.get("keyword") + if keyword: + query = dict(portal_type="AnalysisService", getKeyword=keyword) + brains = api.search(query, SETUP_CATALOG) + if len(brains) == 1: + uid = api.get_uid(brains[0]) + value["uid"] = uid + return value + + +registerField(ResultsRangesField, title="ResultsRanges", + description="Used for storing a results ranges",) diff --git a/bika/lims/browser/header_table.py b/bika/lims/browser/header_table.py index da66866834..177ba54902 100644 --- a/bika/lims/browser/header_table.py +++ b/bika/lims/browser/header_table.py @@ -24,6 +24,7 @@ from bika.lims import logger from bika.lims.api.security import check_permission from bika.lims.browser import BrowserView +from bika.lims.interfaces import IAnalysisRequestWithPartitions from bika.lims.interfaces import IHeaderTableFieldRenderer from bika.lims.utils import t from plone.memoize import view as viewcache @@ -82,6 +83,12 @@ def __call__(self): self.context.plone_utils.addPortalMessage(message, "info") return self.template() + @viewcache.memoize + def is_primary_with_partitions(self): + """Check if the Sample is a primary with partitions + """ + return IAnalysisRequestWithPartitions.providedBy(self.context) + @viewcache.memoize def is_edit_allowed(self): """Check permission 'ModifyPortalContent' on the context diff --git a/bika/lims/browser/partition_magic.py b/bika/lims/browser/partition_magic.py index bb829d8829..f55baf715e 100644 --- a/bika/lims/browser/partition_magic.py +++ b/bika/lims/browser/partition_magic.py @@ -86,8 +86,6 @@ def __call__(self): # The creation of partitions w/o analyses is allowed. Maybe the # user wants to add the analyses later manually or wants to keep # this partition stored in a freezer for some time - # Note we set "remove_primary_analyses" to False cause we want - # user to be able to add same analyses to different partitions. analyses_uids = partition.get("analyses", []) partition = create_partition( request=self.request, @@ -96,7 +94,6 @@ def __call__(self): container=container_uid, preservation=preservation_uid, analyses=analyses_uids, - remove_primary_analyses=False, internal_use=internal_use, ) partitions.append(partition) @@ -112,9 +109,6 @@ def __call__(self): # If no partitions were created, show a warning message return self.redirect(message=_("No partitions were created")) - # Remove analyses from primary Analysis Requests - self.remove_primary_analyses() - message = _("Created {} partitions: {}".format( len(partitions), ", ".join(map(api.get_title, partitions)))) return self.redirect(message=message) @@ -133,14 +127,6 @@ def push_primary_analyses_for_removal(self, analysis_request, analyses): to_remove.extend(analyses) self.analyses_to_remove[analysis_request] = list(set(to_remove)) - def remove_primary_analyses(self): - """Remove analyses relocated to partitions - """ - for ar, analyses in self.analyses_to_remove.items(): - analyses_ids = list(set(map(api.get_id, analyses))) - ar.manage_delObjects(analyses_ids) - self.analyses_to_remove = dict() - def get_ar_data(self): """Returns a list of AR data """ diff --git a/bika/lims/browser/templates/header_table.pt b/bika/lims/browser/templates/header_table.pt index 22f48884db..d33e5ff89f 100644 --- a/bika/lims/browser/templates/header_table.pt +++ b/bika/lims/browser/templates/header_table.pt @@ -12,7 +12,8 @@ @@ -21,22 +22,34 @@ + + @@ -59,17 +73,28 @@ fieldName python:action['fieldName']; field python:context.Schema()[fieldName]; field_macro here/widgets/field/macros/edit; + editable python:mode=='edit'; + primary_bound python:getattr(field, 'primary_bound', False); widget python:field.widget; accessor python:field.getAccessor(context); errors python:{};"> diff --git a/bika/lims/browser/viewlets/analysisrequest.py b/bika/lims/browser/viewlets/analysisrequest.py index 1e4bfd59c3..3582801989 100644 --- a/bika/lims/browser/viewlets/analysisrequest.py +++ b/bika/lims/browser/viewlets/analysisrequest.py @@ -20,7 +20,12 @@ from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from plone.app.layout.viewlets import ViewletBase + +from bika.lims import FieldEditSpecification from bika.lims import api +from bika.lims import logger +from bika.lims.api.analysis import is_result_range_compliant +from bika.lims.api.security import check_permission class InvalidAnalysisRequestViewlet(ViewletBase): @@ -82,3 +87,81 @@ class DetachedPartitionViewlet(ViewletBase): detached from """ template = ViewPageTemplateFile("templates/detached_partition_viewlet.pt") + + +class ResultsRangesOutOfDateViewlet(ViewletBase): + """Print a viewlet that displays if results ranges from Sample are different + from results ranges initially set through Specifications field. If so, this + means the Specification initially set has changed since it was assigned to + the Sample and for new analyses, the ranges defined in the initial + specification ranges will be used instead of the new ones. + """ + + def is_specification_editable(self): + """Returns whether the Specification field is editable or not + """ + return check_permission(FieldEditSpecification, self.context) + + def is_results_ranges_out_of_date(self): + """Returns whether the value for ResultsRange field does not match with + the results ranges that come from the Specification assigned + """ + sample = self.context + sample_rr = sample.getResultsRange() + if not sample_rr: + # No results ranges set to this Sample, do nothing + return False + + specifications = sample.getSpecification() + if not specifications: + # The specification was once assigned, but unassigned later + return False + + spec_rr = specifications.getResultsRange() + + # Omit services not present in current Sample + services = map(lambda an: an.getServiceUID, sample.getAnalyses()) + sample_rr = filter(lambda rr: rr.uid in services, sample_rr) + spec_rr = filter(lambda rr: rr.uid in services, spec_rr) + + return sample_rr != spec_rr + + +class SpecificationNotCompliantViewlet(ViewletBase): + """Print a viewlet that displays if the sample contains analyses that are + not compliant with the Specification initially set (stored in Sample's + ResultsRange field). If so, this means that user changed the results ranges + of the analyses manually, either by adding new ones or by modifying the + existing ones via "Manage analyses" view. And results range for those + analyses are different from the Specification initially set. + """ + + def is_specification_editable(self): + """Returns whether the Specification field is editable or not + """ + return check_permission(FieldEditSpecification, self.context) + + def get_non_compliant_analyses(self): + """Returns the list of analysis keywords from this sample with a + result range set not compliant with the result range of the Sample + """ + non_compliant = [] + skip = ["cancelled", "retracted", "rejected"] + + # Check if the results ranges set to analyses individually remain + # compliant with the Sample's ResultRange + analyses = self.context.getAnalyses(full_objects=True) + for analysis in analyses: + # Skip non-valid/inactive analyses + if api.get_review_status(analysis) in skip: + continue + + if not is_result_range_compliant(analysis): + # Result range for this service has been changed manually, + # it does not match with sample's ResultRange + an_title = api.get_title(analysis) + keyword = analysis.getKeyword() + non_compliant.append("{} ({})".format(an_title, keyword)) + + # Return the list of keywords from non-compliant analyses + return list(set(non_compliant)) diff --git a/bika/lims/browser/viewlets/configure.zcml b/bika/lims/browser/viewlets/configure.zcml index 4a988c283c..7b4dd5bcec 100644 --- a/bika/lims/browser/viewlets/configure.zcml +++ b/bika/lims/browser/viewlets/configure.zcml @@ -192,36 +192,69 @@ + for="bika.lims.interfaces.IAnalysisRequestWithPartitions" + name="bika.lims.primary_ar_viewlet" + class=".analysisrequest.PrimaryAnalysisRequestViewlet" + manager="plone.app.layout.viewlets.interfaces.IAboveContent" + template="templates/primary_ar_viewlet.pt" + permission="zope2.View" + layer="bika.lims.interfaces.IBikaLIMS" + /> + for="bika.lims.interfaces.IAnalysisRequestPartition" + name="bika.lims.partition_ar_viewlet" + class=".analysisrequest.PartitionAnalysisRequestViewlet" + manager="plone.app.layout.viewlets.interfaces.IAboveContent" + template="templates/partition_ar_viewlet.pt" + permission="zope2.View" + layer="bika.lims.interfaces.IBikaLIMS" + /> - + + for="bika.lims.interfaces.IAnalysisRequest" + name="bika.lims.detached_partition_viewlet" + class=".analysisrequest.DetachedPartitionViewlet" + manager="plone.app.layout.viewlets.interfaces.IAboveContent" + template="templates/detached_partition_viewlet.pt" + permission="zope2.View" + layer="bika.lims.interfaces.IBikaLIMS" + /> + + + + + + - + + + + diff --git a/bika/lims/browser/viewlets/dynamic_specs.py b/bika/lims/browser/viewlets/dynamic_specs.py index 06c878ebec..72d252fc19 100644 --- a/bika/lims/browser/viewlets/dynamic_specs.py +++ b/bika/lims/browser/viewlets/dynamic_specs.py @@ -18,8 +18,8 @@ # Copyright 2018-2020 by it's authors. # Some rights reserved, see README and LICENSE. -from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from plone.app.layout.viewlets import ViewletBase +from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile class DynamicSpecsViewlet(ViewletBase): @@ -28,3 +28,26 @@ class DynamicSpecsViewlet(ViewletBase): in the xls file from the Dynamic Specification """ template = ViewPageTemplateFile("templates/dynamic_specs_viewlet.pt") + + +class SampleDynamicSpecsViewlet(ViewletBase): + """Displays an informative message in Sample view when the assigned + specification has a dynamic specification assigned, so ranges set manually + might be overriden by the ranges provided in the xls file from the Dynamic + Specification + """ + template = ViewPageTemplateFile( + "templates/sample_dynamic_specs_viewlet.pt") + + def get_dynamic_specification(self): + """Returns the dynamic specification assigned to the Sample via + Specification, but only if the current view is manage analyses + """ + if not self.request.getURL().endswith("analyses"): + # Do not display the viewlet if not in manage analyses + return None + + spec = self.context.getSpecification() + if spec: + return spec.getDynamicAnalysisSpec() + return None diff --git a/bika/lims/browser/viewlets/templates/dynamic_specs_viewlet.pt b/bika/lims/browser/viewlets/templates/dynamic_specs_viewlet.pt index 84353a2ae5..4629b47b37 100644 --- a/bika/lims/browser/viewlets/templates/dynamic_specs_viewlet.pt +++ b/bika/lims/browser/viewlets/templates/dynamic_specs_viewlet.pt @@ -10,15 +10,22 @@ - - This Analysis Specification has a Dynamic Specification assigned -

+ + This Analysis Specification has a Dynamic Specification assigned + +

+

Be aware that the ranges provided in the spreadsheet file from the dynamic specification might override the ranges defined in the Specifications list below. - +
+ + Visit the Dynamic Specification for additional information: +   +

diff --git a/bika/lims/browser/viewlets/templates/primary_ar_viewlet.pt b/bika/lims/browser/viewlets/templates/primary_ar_viewlet.pt index bca8cd46cc..7fb5df08c1 100644 --- a/bika/lims/browser/viewlets/templates/primary_ar_viewlet.pt +++ b/bika/lims/browser/viewlets/templates/primary_ar_viewlet.pt @@ -6,20 +6,27 @@
diff --git a/bika/lims/browser/viewlets/templates/resultsranges_out_of_date_viewlet.pt b/bika/lims/browser/viewlets/templates/resultsranges_out_of_date_viewlet.pt new file mode 100644 index 0000000000..dfe1b31dcd --- /dev/null +++ b/bika/lims/browser/viewlets/templates/resultsranges_out_of_date_viewlet.pt @@ -0,0 +1,38 @@ +
+ +
+ +
+ +
+ +

+ + Specification ranges have changed since they were assigned + +

+

+ + New ranges won't be applied to neither new nor current analyses. + Re-assign the Specification if you want to apply latest changes. + +
+ + Visit the Specification's changes history for additional information: + + +

+
+
+
diff --git a/bika/lims/browser/viewlets/templates/sample_dynamic_specs_viewlet.pt b/bika/lims/browser/viewlets/templates/sample_dynamic_specs_viewlet.pt new file mode 100644 index 0000000000..9965e354d6 --- /dev/null +++ b/bika/lims/browser/viewlets/templates/sample_dynamic_specs_viewlet.pt @@ -0,0 +1,32 @@ + diff --git a/bika/lims/browser/viewlets/templates/specification_non_compliant_viewlet.pt b/bika/lims/browser/viewlets/templates/specification_non_compliant_viewlet.pt new file mode 100644 index 0000000000..d65e48cae9 --- /dev/null +++ b/bika/lims/browser/viewlets/templates/specification_non_compliant_viewlet.pt @@ -0,0 +1,36 @@ +
+ +
+ +
+ +
+ +

+ + Ranges for some analyses are different from the Specification + +

+

+ + The ranges for the following analyses have been manually changed and + they are no longer compliant with the ranges of the Specification: + + +
+ + Re-assign the Specification if you want to restore analysis ranges. + +

+
+
+
diff --git a/bika/lims/browser/widgets/analysisspecificationwidget.py b/bika/lims/browser/widgets/analysisspecificationwidget.py index 96be9ceb24..4b6570a945 100644 --- a/bika/lims/browser/widgets/analysisspecificationwidget.py +++ b/bika/lims/browser/widgets/analysisspecificationwidget.py @@ -29,6 +29,7 @@ from bika.lims.config import MAX_OPERATORS from bika.lims.config import MIN_OPERATORS from bika.lims.permissions import FieldEditSpecification +from bika.lims.utils import dicts_to_dict from bika.lims.utils import get_image from bika.lims.utils import get_link from bika.lims.utils import to_choices @@ -134,7 +135,8 @@ def update(self): """ super(AnalysisSpecificationView, self).update() self.allow_edit = self.is_edit_allowed() - self.specification = self.context.getResultsRangeDict() + results_range = self.context.getResultsRange() + self.specification = dicts_to_dict(results_range, "keyword") self.dynamic_spec = self.context.getDynamicAnalysisSpec() @view.memoize @@ -347,7 +349,6 @@ def process_form(self, instance, field, form, empty_marker=None, values.append(subfield_values) - return values, {} def _get_spec_value(self, form, uid, key, check_floatable=True, diff --git a/bika/lims/content/abstractanalysis.py b/bika/lims/content/abstractanalysis.py index 279c9f0452..fac63cd391 100644 --- a/bika/lims/content/abstractanalysis.py +++ b/bika/lims/content/abstractanalysis.py @@ -40,6 +40,7 @@ from bika.lims.browser.fields import HistoryAwareReferenceField from bika.lims.browser.fields import InterimFieldsField from bika.lims.browser.fields import UIDReferenceField +from bika.lims.browser.fields import ResultRangeField from bika.lims.browser.fields.uidreferencefield import get_backreferences from bika.lims.browser.widgets import RecordsWidget from bika.lims.config import LDL @@ -148,6 +149,12 @@ ) ) +# Results Range that applies to this analysis +ResultsRange = ResultRangeField( + "ResultsRange", + required=0 +) + schema = schema.copy() + Schema(( AnalysisService, Analyst, @@ -160,7 +167,8 @@ RetestOf, Uncertainty, Calculation, - InterimFields + InterimFields, + ResultsRange, )) @@ -484,10 +492,6 @@ def setResult(self, value): # Set the result field self.getField("Result").set(self, val) - @security.public - def getResultsRange(self): - raise NotImplementedError("getResultsRange is not implemented.") - @security.public def calculateResult(self, override=False, cascade=False): """Calculates the result for the current analysis if it depends of diff --git a/bika/lims/content/abstractroutineanalysis.py b/bika/lims/content/abstractroutineanalysis.py index 0cb8aca0cb..41fbdd752e 100644 --- a/bika/lims/content/abstractroutineanalysis.py +++ b/bika/lims/content/abstractroutineanalysis.py @@ -21,6 +21,17 @@ from datetime import timedelta from AccessControl import ClassSecurityInfo +from Products.ATContentTypes.utils import DT2dt +from Products.ATContentTypes.utils import dt2DT +from Products.Archetypes.Field import BooleanField +from Products.Archetypes.Field import FixedPointField +from Products.Archetypes.Field import StringField +from Products.Archetypes.Schema import Schema +from Products.CMFCore.permissions import View +from zope.interface import alsoProvides +from zope.interface import implements +from zope.interface import noLongerProvides + from bika.lims import api from bika.lims import bikaMessageFactory as _ from bika.lims.browser.fields import UIDReferenceField @@ -28,23 +39,15 @@ from bika.lims.catalog.indexers.baseanalysis import sortable_title from bika.lims.content.abstractanalysis import AbstractAnalysis from bika.lims.content.abstractanalysis import schema -from bika.lims.content.analysisspec import ResultsRangeDict from bika.lims.content.clientawaremixin import ClientAwareMixin from bika.lims.content.reflexrule import doReflexRuleAction from bika.lims.interfaces import IAnalysis from bika.lims.interfaces import ICancellable from bika.lims.interfaces import IDynamicResultsRange +from bika.lims.interfaces import IInternalUse from bika.lims.interfaces import IRoutineAnalysis from bika.lims.interfaces.analysis import IRequestAnalysis from bika.lims.workflow import getTransitionDate -from Products.Archetypes.Field import BooleanField -from Products.Archetypes.Field import FixedPointField -from Products.Archetypes.Field import StringField -from Products.Archetypes.Schema import Schema -from Products.ATContentTypes.utils import DT2dt -from Products.ATContentTypes.utils import dt2DT -from Products.CMFCore.permissions import View -from zope.interface import implements # True if the analysis is created by a reflex rule @@ -335,9 +338,7 @@ def getAnalysisRequestPrintStatus(self): @security.public def getResultsRange(self): - """Returns the valid result range for this routine analysis based on the - results ranges defined in the Analysis Request this routine analysis is - assigned to. + """Returns the valid result range for this routine analysis A routine analysis will be considered out of range if it result falls out of the range defined in "min" and "max". If there are values set for @@ -347,21 +348,12 @@ def getResultsRange(self): :return: A dictionary with keys "min", "max", "warn_min" and "warn_max" :rtype: dict """ - specs = ResultsRangeDict() - analysis_request = self.getRequest() - if not analysis_request: - return specs - - keyword = self.getKeyword() - ar_ranges = analysis_request.getResultsRange() - # Get the result range that corresponds to this specific analysis - an_range = [rr for rr in ar_ranges if rr.get('keyword', '') == keyword] - rr = an_range and an_range[0] or specs + results_range = self.getField("ResultsRange").get(self) # dynamic results range adapter adapter = IDynamicResultsRange(self, None) if adapter: - rr.update(adapter()) - return rr + results_range.update(adapter()) + return results_range @security.public def getSiblings(self, retracted=False): @@ -484,6 +476,16 @@ def setHidden(self, hidden): self.setHiddenManually(True) self.getField('Hidden').set(self, hidden) + @security.public + def setInternalUse(self, internal_use): + """Applies the internal use of this Analysis. Analyses set for internal + use are not accessible to clients and are not visible in reports + """ + if internal_use: + alsoProvides(self, IInternalUse) + else: + noLongerProvides(self, IInternalUse) + @security.public def setReflexAnalysisOf(self, analysis): """Sets the analysis that has been reflexed in order to create this diff --git a/bika/lims/content/analysisrequest.py b/bika/lims/content/analysisrequest.py index 8e4c82ba23..3cf0f25a02 100644 --- a/bika/lims/content/analysisrequest.py +++ b/bika/lims/content/analysisrequest.py @@ -25,13 +25,42 @@ from urlparse import urljoin from AccessControl import ClassSecurityInfo +from DateTime import DateTime +from Products.ATExtensions.field import RecordsField +from Products.Archetypes.Widget import RichWidget +from Products.Archetypes.atapi import BaseFolder +from Products.Archetypes.atapi import BooleanField +from Products.Archetypes.atapi import BooleanWidget +from Products.Archetypes.atapi import ComputedField +from Products.Archetypes.atapi import ComputedWidget +from Products.Archetypes.atapi import FileField +from Products.Archetypes.atapi import FileWidget +from Products.Archetypes.atapi import FixedPointField +from Products.Archetypes.atapi import ReferenceField +from Products.Archetypes.atapi import StringField +from Products.Archetypes.atapi import StringWidget +from Products.Archetypes.atapi import TextField +from Products.Archetypes.atapi import registerType +from Products.Archetypes.public import Schema +from Products.Archetypes.references import HoldingReference +from Products.CMFCore.permissions import ModifyPortalContent +from Products.CMFCore.permissions import View +from Products.CMFCore.utils import getToolByName +from Products.CMFPlone.utils import _createObjectByType +from Products.CMFPlone.utils import safe_unicode +from zope.interface import alsoProvides +from zope.interface import implements +from zope.interface import noLongerProvides + from bika.lims import api from bika.lims import bikaMessageFactory as _ from bika.lims import deprecated from bika.lims import logger +from bika.lims.api.security import check_permission from bika.lims.browser.fields import ARAnalysesField from bika.lims.browser.fields import DateTimeField from bika.lims.browser.fields import DurationField +from bika.lims.browser.fields import ResultsRangesField from bika.lims.browser.fields import UIDReferenceField from bika.lims.browser.fields import EmailsField from bika.lims.browser.fields.remarksfield import RemarksField @@ -49,11 +78,11 @@ from bika.lims.catalog.bika_catalog import BIKA_CATALOG from bika.lims.config import PRIORITIES from bika.lims.config import PROJECTNAME -from bika.lims.content.analysisspec import ResultsRangeDict from bika.lims.content.bikaschema import BikaSchema from bika.lims.content.clientawaremixin import ClientAwareMixin from bika.lims.interfaces import IAnalysisRequest from bika.lims.interfaces import IAnalysisRequestPartition +from bika.lims.interfaces import IAnalysisRequestWithPartitions from bika.lims.interfaces import IBatch from bika.lims.interfaces import ICancellable from bika.lims.interfaces import IClient @@ -83,8 +112,8 @@ from bika.lims.permissions import FieldEditResultsInterpretation from bika.lims.permissions import FieldEditSampleCondition from bika.lims.permissions import FieldEditSamplePoint -from bika.lims.permissions import FieldEditSampler from bika.lims.permissions import FieldEditSampleType +from bika.lims.permissions import FieldEditSampler from bika.lims.permissions import FieldEditSamplingDate from bika.lims.permissions import FieldEditSamplingDeviation from bika.lims.permissions import FieldEditSamplingRound @@ -99,32 +128,6 @@ from bika.lims.utils import user_fullname from bika.lims.workflow import getTransitionDate from bika.lims.workflow import getTransitionUsers -from DateTime import DateTime -from Products.Archetypes.atapi import BaseFolder -from Products.Archetypes.atapi import BooleanField -from Products.Archetypes.atapi import BooleanWidget -from Products.Archetypes.atapi import ComputedField -from Products.Archetypes.atapi import ComputedWidget -from Products.Archetypes.atapi import FileField -from Products.Archetypes.atapi import FileWidget -from Products.Archetypes.atapi import FixedPointField -from Products.Archetypes.atapi import ReferenceField -from Products.Archetypes.atapi import StringField -from Products.Archetypes.atapi import StringWidget -from Products.Archetypes.atapi import TextField -from Products.Archetypes.atapi import registerType -from Products.Archetypes.public import Schema -from Products.Archetypes.references import HoldingReference -from Products.Archetypes.Widget import RichWidget -from Products.ATExtensions.field import RecordsField -from Products.CMFCore.permissions import ModifyPortalContent -from Products.CMFCore.permissions import View -from Products.CMFCore.utils import getToolByName -from Products.CMFPlone.utils import _createObjectByType -from Products.CMFPlone.utils import safe_unicode -from zope.interface import alsoProvides -from zope.interface import implements -from zope.interface import noLongerProvides IMG_SRC_RX = re.compile(r' Max'), - 'rangecomment': _('Range Comment'), - }, widget=AnalysisSpecificationWidget( - checkbox_bound=0, label=_("Specifications"), description=_( "'Min' and 'Max' values indicate a valid results range. Any " @@ -160,27 +128,6 @@ def contextual_title(self): else: return self.title + " (" + translate(_("Client")) + ")" - @security.public - def getResultsRangeDict(self): - """Return a dictionary with the specification fields for each - service. The keys of the dictionary are the keywords of each - analysis service. Each service contains a dictionary in which - each key is the name of the spec field: - specs['keyword'] = {'min': value, - 'max': value, - 'warnmin': value, - ... } - """ - specs = {} - subfields = self.Schema()['ResultsRange'].subfields - for spec in self.getResultsRange(): - keyword = spec['keyword'] - specs[keyword] = {} - for key in subfields: - if key not in ['uid', 'keyword']: - specs[keyword][key] = spec.get(key, '') - return specs - atapi.registerType(AnalysisSpec, PROJECTNAME) @@ -189,6 +136,7 @@ class ResultsRangeDict(dict): def __init__(self, *arg, **kw): super(ResultsRangeDict, self).__init__(*arg, **kw) + self["uid"] = self.uid self["min"] = self.min self["max"] = self.max self["error"] = self.error @@ -197,6 +145,12 @@ def __init__(self, *arg, **kw): self["min_operator"] = self.min_operator self["max_operator"] = self.max_operator + @property + def uid(self): + """The uid of the service this ResultsRange refers to + """ + return self.get("uid", '') + @property def min(self): return self.get("min", '') @@ -248,3 +202,22 @@ def min_operator(self, value): @max_operator.setter def max_operator(self, value): self['max_operator'] = value + + def __eq__(self, other): + if isinstance(other, dict): + other = ResultsRangeDict(other) + + if isinstance(other, ResultsRangeDict): + # Balance both dicts with same keys, but without corrupting them + current = dict(filter(lambda o: o[0] in other, self.items())) + other = dict(filter(lambda o: o[0] in current, other.items())) + + # Ensure that all values are str (sometimes ranges are stored as + # numeric values and sometimes are stored as str) + current = dict(map(lambda o: (o[0], str(o[1])), current.items())) + other = dict(map(lambda o: (o[0], str(o[1])), other.items())) + + # Check if both are equal + return current == other + + return super(ResultsRangeDict, self).__eq__(other) diff --git a/bika/lims/content/calculation.py b/bika/lims/content/calculation.py index 0ba888b09e..c61a4fcfaa 100644 --- a/bika/lims/content/calculation.py +++ b/bika/lims/content/calculation.py @@ -252,14 +252,39 @@ def getCalculationDependencies(self, flat=False, deps=None): if deps is None: deps = [] if flat is True else {} + def get_fetched(deps): + if isinstance(deps, list): + return map(api.get_uid, deps) + if isinstance(deps, dict): + fetched = deps.keys() + for value in deps.values(): + fetched.extend(get_fetched(value)) + return fetched + return [] + + # List of service uids that have been grabbed already. This is used to + # prevent an infinite recursion error when the formula includes the + # Keyword of the Service that includes the Calculation + fetched = get_fetched(deps) + for service in self.getDependentServices(): - calc = service.getCalculation() - if calc: - calc.getCalculationDependencies(flat, deps) + if api.get_uid(service) in fetched: + # Processed already. Omit to prevent recursion + continue + if flat: deps.append(service) else: deps[service.UID()] = {} + + calc = service.getCalculation() + if calc: + calc.getCalculationDependencies(flat, deps) + + if flat: + # Remove duplicates + deps = list(set(deps)) + return deps def getCalculationDependants(self, deps=None): diff --git a/bika/lims/content/referenceanalysis.py b/bika/lims/content/referenceanalysis.py index 2e41eed68e..a2443455da 100644 --- a/bika/lims/content/referenceanalysis.py +++ b/bika/lims/content/referenceanalysis.py @@ -19,18 +19,20 @@ # Some rights reserved, see README and LICENSE. from AccessControl import ClassSecurityInfo +from DateTime import DateTime +from Products.Archetypes.Field import StringField +from Products.Archetypes.public import Schema +from Products.Archetypes.public import registerType +from plone.app.blob.field import BlobField +from zope.interface import implements + +from bika.lims import api from bika.lims.config import PROJECTNAME from bika.lims.config import STD_TYPES from bika.lims.content.abstractanalysis import AbstractAnalysis from bika.lims.content.abstractanalysis import schema from bika.lims.content.analysisspec import ResultsRangeDict from bika.lims.interfaces import IReferenceAnalysis -from DateTime import DateTime -from plone.app.blob.field import BlobField -from Products.Archetypes.Field import StringField -from Products.Archetypes.public import Schema -from Products.Archetypes.public import registerType -from zope.interface import implements schema = schema.copy() + Schema(( StringField( @@ -120,7 +122,9 @@ def getResultsRange(self): :return: A dictionary with the keys min and max :rtype: dict """ - specs = ResultsRangeDict(result="") + specs = ResultsRangeDict(uid=api.get_uid(self), + keyword=self.getKeyword(), + result="") sample = self.getSample() if not sample: return specs diff --git a/bika/lims/exportimport/setupdata/__init__.py b/bika/lims/exportimport/setupdata/__init__.py index 75bb1717c2..853e6d8ab1 100644 --- a/bika/lims/exportimport/setupdata/__init__.py +++ b/bika/lims/exportimport/setupdata/__init__.py @@ -1718,7 +1718,6 @@ def Import(self): "keyword": service.getKeyword(), "min": row["min"] if row["min"] else "0", "max": row["max"] if row["max"] else "0", - "error": row["error"] if row["error"] else "0" }) # write objects. for parent in bucket.keys(): diff --git a/bika/lims/interfaces/__init__.py b/bika/lims/interfaces/__init__.py index df17f80691..027eb27478 100644 --- a/bika/lims/interfaces/__init__.py +++ b/bika/lims/interfaces/__init__.py @@ -113,6 +113,21 @@ class IAnalysisRequest(Interface): """ +class IHaveDescendants(Interface): + """Marker interface for objects that have Descendants + """ + + def getDescendants(self, all_descendants=False): + """Returns descendants of this object + :param all_descendants: if True, returns all descendants from hierarchy + """ + + +class IAnalysisRequestWithPartitions(IHaveDescendants): + """Marker interface for Analysis Requests that have Partitions + """ + + class IAnalysisRequestPartition(Interface): """Marker interface for Analysis Requests that are also Partitions """ diff --git a/bika/lims/subscribers/analysisrequest.py b/bika/lims/subscribers/analysisrequest.py index ffdad450ed..04933a24e1 100644 --- a/bika/lims/subscribers/analysisrequest.py +++ b/bika/lims/subscribers/analysisrequest.py @@ -41,12 +41,7 @@ def ObjectModifiedEventHandler(instance, event): # Mark/Unmark all analyses with IInternalUse to control their # visibility in results reports for analysis in instance.objectValues("Analysis"): - if internal_use: - alsoProvides(analysis, IInternalUse) - else: - noLongerProvides(analysis, IInternalUse) - - # Reindex analysis security in catalogs + analysis.setInternalUse(internal_use) analysis.reindexObjectSecurity() # If internal use is True, cascade same setting to partitions @@ -62,6 +57,10 @@ def AfterTransitionEventHandler(instance, event): This function does not superseds workflow.analysisrequest.events, rather it only updates the permissions in accordance with InternalUse value """ + # Permissions for a given object change after transitions to meet with the + # workflow definition. InternalUse prevents Clients to access to Samples + # and analyses as well. Therefore, we have to update the permissions + # manually here to override those set by default update_internal_use_permissions(instance) diff --git a/bika/lims/tests/doctests/API_analysis.rst b/bika/lims/tests/doctests/API_analysis.rst index aeb220f9a2..d699a012b9 100644 --- a/bika/lims/tests/doctests/API_analysis.rst +++ b/bika/lims/tests/doctests/API_analysis.rst @@ -512,6 +512,11 @@ Set open interval for min and max from water specification ... range['max_operator'] = 'lt' >>> specification.setResultsRange(ranges) +We need to re-apply the Specification for the changes to take effect: + + >>> ar.setSpecification(None) + >>> ar.setSpecification(specification) + First, get the analyses from slot 1 and sort them asc: >>> analyses = worksheet.get_analyses_at(1) @@ -540,6 +545,11 @@ Set left-open interval for min and max from water specification ... range['max_operator'] = 'lt' >>> specification.setResultsRange(ranges) +We need to re-apply the Specification for the changes to take effect: + + >>> ar.setSpecification(None) + >>> ar.setSpecification(specification) + First, get the analyses from slot 1 and sort them asc: >>> analyses = worksheet.get_analyses_at(1) @@ -568,6 +578,11 @@ Set right-open interval for min and max from water specification ... range['max_operator'] = 'leq' >>> specification.setResultsRange(ranges) +We need to re-apply the Specification for the changes to take effect: + + >>> ar.setSpecification(None) + >>> ar.setSpecification(specification) + First, get the analyses from slot 1 and sort them asc: >>> analyses = worksheet.get_analyses_at(1) diff --git a/bika/lims/tests/doctests/API_snapshot.rst b/bika/lims/tests/doctests/API_snapshot.rst index 2f76c63835..94a93b1c07 100644 --- a/bika/lims/tests/doctests/API_snapshot.rst +++ b/bika/lims/tests/doctests/API_snapshot.rst @@ -270,7 +270,7 @@ Comparing Snapshots The changes of two snapshots can be compared with `compare_snapshots`: - >>> snap0 = get_snapshot_by_version(sample, 0) + >>> snap0 = get_snapshot_by_version(sample, 2) Add 2 more analyses (Mg and Ca): diff --git a/bika/lims/tests/doctests/ARAnalysesField.rst b/bika/lims/tests/doctests/ARAnalysesField.rst index 10b19182e7..330e39cd0c 100644 --- a/bika/lims/tests/doctests/ARAnalysesField.rst +++ b/bika/lims/tests/doctests/ARAnalysesField.rst @@ -41,6 +41,13 @@ Functional Helpers: >>> def timestamp(format="%Y-%m-%d"): ... return DateTime().strftime(format) + >>> def get_analyses_from(sample, services): + ... if not isinstance(services, (list, tuple)): + ... services = [services] + ... uids = map(api.get_uid, services) + ... analyses = sample.getAnalyses(full_objects=True) + ... return filter(lambda an: an.getServiceUID() in uids, analyses) + Variables:: >>> date_now = timestamp() @@ -241,23 +248,16 @@ The field takes the following parameters: Pass in all prior created Analysis Services: >>> all_services = [analysisservice1, analysisservice2, analysisservice3] - >>> new_analyses = field.set(ar, all_services) + >>> field.set(ar, all_services) We expect to have now the `CA` and `MG` Analyses as well: - >>> sorted(new_analyses, key=methodcaller('getId')) - [, ] - -In the Analyis Request should be now three Analyses: - - >>> len(ar.objectValues("Analysis")) - 3 + >>> sorted(ar.objectValues("Analysis"), key=methodcaller('getId')) + [, , ] Removing Analyses is done by omitting those from the `items` list: - >>> new_analyses = field.set(ar, [analysisservice1]) - >>> sorted(new_analyses, key=methodcaller('getId')) - [] + >>> field.set(ar, [analysisservice1]) Now there should be again only one Analysis assigned: @@ -272,7 +272,7 @@ We expect to have just the `PH` Analysis again: The field can also handle UIDs of Analyses Services: >>> service_uids = map(api.get_uid, all_services) - >>> new_analyses = field.set(ar, service_uids) + >>> field.set(ar, service_uids) We expect again to have all the three Analyses: @@ -289,7 +289,7 @@ The field should also handle catalog brains: >>> api.get_title(brain) 'Calcium' - >>> new_analyses = field.set(ar, [brain]) + >>> field.set(ar, [brain]) We expect now to have just the `CA` analysis assigned: @@ -298,7 +298,7 @@ We expect now to have just the `CA` analysis assigned: Now let's try int mixed, one catalog brain and one object: - >>> new_analyses = field.set(ar, [analysisservice1, brain]) + >>> field.set(ar, [analysisservice1, brain]) We expect now to have now `PH` and `CA`: @@ -308,7 +308,7 @@ We expect now to have now `PH` and `CA`: Finally, we test it with an `Analysis` object: >>> analysis1 = ar["PH"] - >>> new_analyses = field.set(ar, [analysis1]) + >>> field.set(ar, [analysis1]) >>> sorted(ar.objectValues("Analysis"), key=methodcaller("getId")) [] @@ -330,7 +330,7 @@ It is a dictionary with the following keys and values: Each Analysis can request its own Specification (Result Range): - >>> new_analyses = field.set(ar, all_services) + >>> field.set(ar, all_services) >>> analysis1 = ar[analysisservice1.getKeyword()] >>> analysis2 = ar[analysisservice2.getKeyword()] @@ -350,7 +350,7 @@ Request and have precedence over the lab specifications: >>> arr = [arr1, arr2, arr3] >>> all_analyses = [analysis1, analysis2, analysis3] - >>> new_analyses = field.set(ar, all_analyses, specs=arr) + >>> field.set(ar, all_analyses, specs=arr) >>> myspec1 = analysis1.getResultsRange() >>> myspec1.get("rangecomment") @@ -364,10 +364,10 @@ Request and have precedence over the lab specifications: >>> myspec3.get("rangecomment") 'My CA Spec' -All Result Ranges are set on the AR: +Result Ranges are set to analyses level, but not present in the AR: >>> sorted(map(lambda r: r.get("rangecomment"), ar.getResultsRange())) - ['My CA Spec', 'My MG Spec', 'My PH Spec'] + [] Now we simulate the form input data of the ARs "Manage Analysis" form, so that the User only selected the `PH` service and gave some custom specifications for @@ -376,7 +376,7 @@ this Analysis. The specifications get applied if the keyword matches: >>> ph_specs = {"keyword": analysis1.getKeyword(), "min": 5.2, "max": 7.9, "error": 3} - >>> new_analyses = field.set(ar, [analysis1], specs=[ph_specs]) + >>> field.set(ar, [analysis1], specs=[ph_specs]) We expect to have now just one Analysis set: @@ -415,7 +415,7 @@ Prices are primarily defined on Analyses Services: Created Analyses inherit that price: - >>> new_analyses = field.set(ar, all_services) + >>> field.set(ar, all_services) >>> analysis1 = ar[analysisservice1.getKeyword()] >>> analysis2 = ar[analysisservice2.getKeyword()] @@ -440,7 +440,7 @@ The `setter` also allows to set custom prices for the Analyses: Now we set the field with all analyses services and new prices: - >>> new_analyses = field.set(ar, all_services, prices=prices) + >>> field.set(ar, all_services, prices=prices) The Analyses have now the new prices: @@ -491,7 +491,8 @@ Append interim field `B` to the `Total Hardness` Analysis Service: Now we assign the `Total Hardness` Analysis Service: - >>> new_analyses = field.set(ar, [analysisservice4]) + >>> field.set(ar, [analysisservice4]) + >>> new_analyses = get_analyses_from(ar, analysisservice4) >>> analysis = new_analyses[0] >>> analysis @@ -532,12 +533,7 @@ The Analysis Service returns the interim fields from the Calculation too: Update the AR with the new Analysis Service: - >>> new_analyses = field.set(ar, [analysisservice4]) - -Since no new Analyses were created, the field should return an empty list: - - >>> new_analyses - [] + >>> field.set(ar, [analysisservice4]) The Analysis should be still there: @@ -571,9 +567,8 @@ is removed from an Analysis Request. Assign the `PH` Analysis: - >>> new_analyses = field.set(ar, [analysisservice1]) - >>> new_analyses - [] + >>> field.set(ar, [analysisservice1]) + >>> new_analyses = ar.getAnalyses(full_objects=True) Create a new Worksheet and assign the Analysis to it: @@ -607,9 +602,7 @@ The worksheet contains now the Analysis: Removing the analysis from the AR also unassignes it from the worksheet: - >>> new_analyses = field.set(ar, [analysisservice2]) - >>> new_analyses - [] + >>> field.set(ar, [analysisservice2]) >>> ws.getAnalyses() [] @@ -635,7 +628,7 @@ Get the dependent services: We expect that dependent services get automatically set: - >>> new_analyses = field.set(ar, [analysisservice4]) + >>> field.set(ar, [analysisservice4]) >>> sorted(ar.objectValues("Analysis"), key=methodcaller('getId')) [, , ] @@ -681,7 +674,7 @@ We create a new attachment in the client and assign it to this specific analysis Now we remove the *PH* analysis. Since it is prohibited by the field to remove all analyses from an AR, we will set here some other analyses instead: - >>> new_analyses = field.set(ar2, [analysisservice2, analysisservice3]) + >>> field.set(ar2, [analysisservice2, analysisservice3]) The attachment should be deleted from the client folder as well: @@ -690,14 +683,14 @@ The attachment should be deleted from the client folder as well: Re-adding the *PH* analysis should start with no attachments: - >>> new_analyses = field.set(ar2, [analysisservice1, analysisservice2, analysisservice3]) + >>> field.set(ar2, [analysisservice1, analysisservice2, analysisservice3]) >>> an1 = ar2[analysisservice1.getKeyword()] >>> an1.getAttachment() [] This should work as well when multiple attachments are assigned. - >>> new_analyses = field.set(ar2, [analysisservice1, analysisservice2]) + >>> field.set(ar2, [analysisservice1, analysisservice2]) >>> an1 = ar2[analysisservice1.getKeyword()] >>> an2 = ar2[analysisservice2.getKeyword()] @@ -724,7 +717,7 @@ Assign the second half of the attachments to the *Magnesium* analysis: Removing the *PH* analysis should also remove all the assigned attachments: - >>> new_analyses = field.set(ar2, [analysisservice2]) + >>> field.set(ar2, [analysisservice2]) >>> att2.getId() in ar2.getClient().objectIds() False @@ -837,7 +830,7 @@ And all contained Analyses of the retest keep references to the same Attachments This means that removing that attachment from the retest should **not** delete the attachment from the original AR: - >>> new_analyses = field.set(ar_retest, [analysisservice1]) + >>> field.set(ar_retest, [analysisservice1]) >>> an.getAttachment() [] diff --git a/bika/lims/tests/doctests/ARAnalysesFieldWithPartitions.rst b/bika/lims/tests/doctests/ARAnalysesFieldWithPartitions.rst index c02f3c8b2e..fa546262d2 100644 --- a/bika/lims/tests/doctests/ARAnalysesFieldWithPartitions.rst +++ b/bika/lims/tests/doctests/ARAnalysesFieldWithPartitions.rst @@ -109,56 +109,51 @@ get_from_instance When asked for `Fe` when the primary is given, it returns the analysis, cause it lives in the primary: - >>> fe = field.get_from_instance(sample, Fe) + >>> fe = field.get_from_instance(sample, Fe)[0] >>> fe.getServiceUID() == api.get_uid(Fe) True -But when asked for `Cu` when the primary is given, it returns None, cause it +But when asked for `Cu` when the primary is given, it returns empty, cause it lives in the partition: - >>> cu = field.get_from_instance(sample, Cu) - >>> cu is None - True + >>> field.get_from_instance(sample, Cu) + [] While it returns the analysis when the partition is used: - >>> cu = field.get_from_instance(partition, Cu) + >>> cu = field.get_from_instance(partition, Cu)[0] >>> cu.getServiceUID() == api.get_uid(Cu) True -But when asking the partition for `Fe` it returns None, cause it lives in the +But when asking the partition for `Fe` it returns empty, cause it lives in the ancestor: - >>> fe = field.get_from_instance(partition, Fe) - >>> fe is None - True + >>> field.get_from_instance(partition, Fe) + [] get_from_ancestor ................. -When asked for `Fe` to primary, it returns None because there is no ancestor +When asked for `Fe` to primary, it returns empty because there is no ancestor containing `Fe`: - >>> fe = field.get_from_ancestor(sample, Fe) - >>> fe is None - True + >>> field.get_from_ancestor(sample, Fe) + [] But when asked for `Fe` to the partition, it returns the analysis, cause it it lives in an ancestor from the partition: - >>> fe = field.get_from_ancestor(partition, Fe) + >>> fe = field.get_from_ancestor(partition, Fe)[0] >>> fe.getServiceUID() == api.get_uid(Fe) True -If I ask for `Cu`, that lives in the partition, it will return None for both: +If I ask for `Cu`, that lives in the partition, it will return empty for both: - >>> cu = field.get_from_ancestor(sample, Cu) - >>> cu is None - True + >>> field.get_from_ancestor(sample, Cu) + [] - >>> cu = field.get_from_ancestor(partition, Cu) - >>> cu is None - True + >>> field.get_from_ancestor(partition, Cu) + [] get_from_descendant ................... @@ -166,28 +161,24 @@ get_from_descendant When asked for `Fe` to primary, it returns None because there is no descendant containing `Fe`: - >>> fe = field.get_from_descendant(sample, Fe) - >>> fe is None - True + >>> field.get_from_descendant(sample, Fe) + [] And same with partition: - >>> fe = field.get_from_descendant(partition, Fe) - >>> fe is None - True + >>> field.get_from_descendant(partition, Fe) + [] When asked for `Cu` to primary, it returns the analysis, because it lives in a descendant (partition): - >>> cu = field.get_from_descendant(sample, Cu) - >>> cu.getServiceUID() == api.get_uid(Cu) - True + >>> field.get_from_descendant(sample, Cu) + [] But returns None if I ask to the partition: - >>> cu = field.get_from_descendant(partition, Cu) - >>> cu is None - True + >>> field.get_from_descendant(partition, Cu) + [] get_analyses_from_descendants ............................. @@ -204,37 +195,29 @@ It returns the analyses contained by the descendants: Resolution of analyses from the Sample lineage ---------------------------------------------- -resolve_analysis +resolve_analyses ................ Resolves the analysis from the sample lineage if exists: - >>> fe = field.resolve_analysis(sample, Fe) - >>> fe.getServiceUID() == api.get_uid(Fe) - True - >>> fe.aq_parent == sample - True + >>> field.resolve_analyses(sample, Fe) + [] - >>> cu = field.resolve_analysis(sample, Cu) - >>> cu.getServiceUID() == api.get_uid(Cu) - True - >>> cu.aq_parent == partition - True + >>> field.resolve_analyses(sample, Cu) + [] - >>> au = field.resolve_analysis(sample, Au) - >>> au is None - True + >>> field.resolve_analyses(sample, Au) + [] But when we use the partition and the analysis is found in an ancestor, it moves the analysis into the partition: - >>> fe = field.resolve_analysis(partition, Fe) - >>> fe.getServiceUID() == api.get_uid(Fe) - True - >>> fe.aq_parent == partition - True + >>> field.resolve_analyses(partition, Fe) + [] + >>> sample.objectValues("Analysis") [] + >>> partition.objectValues("Analysis") [, ] @@ -245,30 +228,20 @@ Addition of analyses add_analysis ............ -Setup required parameters: - - >>> prices = hidden = dict() - If we try to add now an analysis that already exists, either in the partition or in the primary, the analysis won't be added: - >>> added = field.add_analysis(sample, Fe, prices, hidden) - >>> added is None - True + >>> field.add_analysis(sample, Fe) >>> sample.objectValues("Analysis") [] - >>> added = field.add_analysis(partition, Fe, prices, hidden) - >>> added is None - True + >>> field.add_analysis(partition, Fe) >>> partition.objectValues("Analysis") [, ] If we add a new analysis, this will be added in the sample we are working with: - >>> au = field.add_analysis(sample, Au, prices, hidden) - >>> au.getServiceUID() == api.get_uid(Au) - True + >>> field.add_analysis(sample, Au) >>> sample.objectValues("Analysis") [] >>> partition.objectValues("Analysis") @@ -281,9 +254,7 @@ Apply the changes: If I try to add an analysis that exists in an ancestor, the analysis gets moved while the function returns None: - >>> added = field.add_analysis(partition, Au, prices, hidden) - >>> added is None - True + >>> field.add_analysis(partition, Au) >>> sample.objectValues("Analysis") [] >>> partition.objectValues("Analysis") @@ -297,7 +268,6 @@ If we try to set same analyses as before to the root sample, nothing happens because the analyses are already there: >>> field.set(sample, [Cu, Fe, Au]) - [] The analyses still belong to the partition though: @@ -309,7 +279,6 @@ The analyses still belong to the partition though: Same result if I set the analyses to the partition: >>> field.set(partition, [Cu, Fe, Au]) - [] >>> sample.objectValues("Analysis") [] >>> partition.objectValues("Analysis") @@ -318,7 +287,6 @@ Same result if I set the analyses to the partition: If I add a new analysis in the list, the analysis is successfully added: >>> field.set(sample, [Cu, Fe, Au, Mg]) - [] >>> sample.objectValues("Analysis") [] @@ -331,13 +299,10 @@ Apply the changes: >>> transaction.commit() -If I set the same analyses to the partition, I don't get any result: +If I set the same analyses to the partition, the `Mg` analysis is moved into +the partition: >>> field.set(partition, [Cu, Fe, Au, Mg]) - [] - -but, the `Mg` analysis has been moved into the partition: - >>> sample.objectValues("Analysis") [] >>> partition.objectValues("Analysis") @@ -346,7 +311,6 @@ but, the `Mg` analysis has been moved into the partition: To remove `Mg` analysis, pass the list without `Mg`: >>> field.set(sample, [Cu, Fe, Au]) - [] The analysis `Mg` has been removed, although it belonged to the partition: @@ -359,10 +323,9 @@ But if I add a new analysis to the primary and I try to remove it from the partition, nothing will happen: >>> field.set(sample, [Cu, Fe, Au, Mg]) - [] >>> field.set(partition, [Cu, Fe, Au]) - [] + >>> sample.objectValues("Analysis") [] >>> partition.objectValues("Analysis") diff --git a/bika/lims/tests/doctests/RemoveAnalysesFromAnalysisRequest.rst b/bika/lims/tests/doctests/RemoveAnalysesFromAnalysisRequest.rst index a322063e75..c05794b4b0 100644 --- a/bika/lims/tests/doctests/RemoveAnalysesFromAnalysisRequest.rst +++ b/bika/lims/tests/doctests/RemoveAnalysesFromAnalysisRequest.rst @@ -80,7 +80,7 @@ Create a new Analysis Request: And remove two analyses (`Cu` and `Fe`): - >>> new_analyses = ar.setAnalyses([Au]) + >>> ar.setAnalyses([Au]) >>> map(lambda an: an.getKeyword(), ar.getAnalyses(full_objects=True)) ['Au'] @@ -144,7 +144,7 @@ Again, the Analysis Request status is still `sample_received`: But if we remove the analysis without result (`Cu`), the Analysis Request transitions to "to_be_verified" because follows `Fe`: - >>> new_analyses = ar.setAnalyses([Fe, Au]) + >>> ar.setAnalyses([Fe, Au]) >>> api.get_workflow_status_of(ar) 'to_be_verified' @@ -153,7 +153,7 @@ Therefore, if we try to remove the analysis `Fe` (in `to_be_verified` state), the Analysis Request will stay in `to_be_verified` and the Analysis will still be assigned: - >>> new_analyses = ar.setAnalyses([Au]) + >>> ar.setAnalyses([Au]) >>> analysis_fe in ar.objectValues() True @@ -173,7 +173,7 @@ The only way to remove the `Fe` analysis is to retract it first: And if we remove analysis `Fe`, the Analysis Request will follow `Au` analysis (that is `verified`): - >>> new_analyses = ar.setAnalyses([Au]) + >>> ar.setAnalyses([Au]) >>> api.get_workflow_status_of(ar) 'verified' @@ -228,6 +228,6 @@ The Analysis Request status is still `sample_received`: But if we remove the analysis without result (`Cu`), the Analysis Request transitions to "verfied" because follows `Fe` and `Au`: - >>> new_analyses = ar.setAnalyses([Fe, Au]) + >>> ar.setAnalyses([Fe, Au]) >>> api.get_workflow_status_of(ar) 'verified' diff --git a/bika/lims/tests/doctests/ServicesCalculationRecursion.rst b/bika/lims/tests/doctests/ServicesCalculationRecursion.rst new file mode 100644 index 0000000000..c55d5ce181 --- /dev/null +++ b/bika/lims/tests/doctests/ServicesCalculationRecursion.rst @@ -0,0 +1,101 @@ +Infinite recursion when fetching dependencies from Service +========================================================== + +This test checks that no infinite recursion error arises when fetching the +dependencies of a Service (via Calculation) that itself contains a keyword in +a calculation from another service bound to a calculation that refers to the +first one as well. + +Running this test from the buildout directory: + + bin/test test_textual_doctests -t ServicesCalculationRecursion.rst + +Test Setup +---------- + +Needed imports: + + >>> from plone.app.testing import setRoles + >>> from plone.app.testing import TEST_USER_ID + >>> from plone.app.testing import TEST_USER_PASSWORD + >>> from bika.lims import api + +Variables: + + >>> portal = self.portal + >>> request = self.request + >>> setup = api.get_setup() + +Create some basic objects for the test: + + >>> setRoles(portal, TEST_USER_ID, ['Manager',]) + >>> labcontact = api.create(setup.bika_labcontacts, "LabContact", Firstname="Lab", Lastname="Manager") + >>> department = api.create(setup.bika_departments, "Department", title="Chemistry", Manager=labcontact) + >>> category = api.create(setup.bika_analysiscategories, "AnalysisCategory", title="Metals", Department=department) + + +Creation of Service with a Calculation that refers to itself +------------------------------------------------------------ + +The most common case is when the Calculation is assigned to the same Analysis +that is referred in the Calculation's formula: + + >>> Ca = api.create(setup.bika_analysisservices, "AnalysisService", title="Calcium", Keyword="Ca", Price="20", Category=category.UID()) + >>> Mg = api.create(setup.bika_analysisservices, "AnalysisService", title="Magnesium", Keyword="Mg", Price="20", Category=category.UID()) + + >>> calc = api.create(setup.bika_calculations, "Calculation", title="Total Hardness") + >>> calc.setFormula("[Ca] + [Mg]") + >>> calc.getFormula() + '[Ca] + [Mg]' + + >>> Ca.setCalculation(calc) + >>> Ca.getCalculation() + + + >>> deps = Ca.getServiceDependencies() + >>> sorted(map(lambda d: d.getKeyword(), deps)) + ['Ca', 'Mg'] + + >>> deps = calc.getCalculationDependencies() + >>> len(deps.keys()) + 2 + + >>> deps = calc.getCalculationDependencies(flat=True) + >>> sorted(map(lambda d: d.getKeyword(), deps)) + ['Ca', 'Mg'] + +The other case is when the initial Service is referred indirectly, through a +calculation a dependency is bound to: + + >>> calc_mg = api.create(setup.bika_calculations, "Calculation", title="Test") + >>> calc_mg.setFormula("[Ca] + [Ca]") + >>> calc_mg.getFormula() + '[Ca] + [Ca]' + + >>> Mg.setCalculation(calc_mg) + >>> Mg.getCalculation() + + + >>> deps = Mg.getServiceDependencies() + >>> sorted(map(lambda d: d.getKeyword(), deps)) + ['Ca', 'Mg'] + + >>> deps = calc_mg.getCalculationDependencies() + >>> len(deps.keys()) + 2 + + >>> deps = calc_mg.getCalculationDependencies(flat=True) + >>> sorted(map(lambda d: d.getKeyword(), deps)) + ['Ca', 'Mg'] + + >>> deps = Ca.getServiceDependencies() + >>> sorted(map(lambda d: d.getKeyword(), deps)) + ['Ca', 'Mg'] + + >>> deps = calc.getCalculationDependencies() + >>> len(deps.keys()) + 2 + + >>> deps = calc.getCalculationDependencies(flat=True) + >>> sorted(map(lambda d: d.getKeyword(), deps)) + ['Ca', 'Mg'] \ No newline at end of file diff --git a/bika/lims/tests/doctests/SpecificationAndResultsRanges.rst b/bika/lims/tests/doctests/SpecificationAndResultsRanges.rst new file mode 100644 index 0000000000..97c930c247 --- /dev/null +++ b/bika/lims/tests/doctests/SpecificationAndResultsRanges.rst @@ -0,0 +1,319 @@ +Specification and Results Ranges with Samples and analyses +========================================================== + +Specification is an object containing a list of results ranges, each one refers +to the min/max/min_warn/max_warn values to apply for a given analysis service. +User can assign a Specification to a Sample, so the results of it's Analyses +will be checked against the results ranges provided by the Specification. + +Running this test from the buildout directory: + + bin/test test_textual_doctests -t SpecificationAndResultsRanges.rst + +Test Setup +---------- + +Needed imports: + + >>> import transaction + >>> from DateTime import DateTime + >>> from plone.app.testing import setRoles + >>> from plone.app.testing import TEST_USER_ID + >>> from plone.app.testing import TEST_USER_PASSWORD + >>> from bika.lims import api + >>> from bika.lims.utils.analysisrequest import create_analysisrequest + >>> from bika.lims.utils.analysisrequest import create_partition + >>> from bika.lims.workflow import doActionFor as do_action_for + +Functional Helpers: + + >>> def new_sample(services, specification=None, results_ranges=None): + ... values = { + ... 'Client': client.UID(), + ... 'Contact': contact.UID(), + ... 'DateSampled': DateTime().strftime("%Y-%m-%d"), + ... 'SampleType': sampletype.UID(), + ... 'Analyses': map(api.get_uid, services), + ... 'Specification': specification or None } + ... + ... ar = create_analysisrequest(client, request, values, results_ranges=results_ranges) + ... transitioned = do_action_for(ar, "receive") + ... return ar + + >>> def get_analysis_from(sample, service): + ... service_uid = api.get_uid(service) + ... for analysis in sample.getAnalyses(full_objects=True): + ... if analysis.getServiceUID() == service_uid: + ... return analysis + ... return None + + >>> def get_results_range_from(obj, service): + ... field = obj.getField("ResultsRange") + ... return field.get(obj, search_by=api.get_uid(service)) + + >>> def set_results_range_for(obj, results_range): + ... rrs = obj.getResultsRange() + ... uid = results_range["uid"] + ... rrs = filter(lambda rr: rr["uid"] != uid, rrs) + ... rrs.append(results_range) + ... obj.setResultsRange(rrs) + + +Variables: + + >>> portal = self.portal + >>> request = self.request + >>> setup = api.get_setup() + +Create some basic objects for the test: + + >>> setRoles(portal, TEST_USER_ID, ['Manager',]) + >>> client = api.create(portal.clients, "Client", Name="Happy Hills", ClientID="HH", MemberDiscountApplies=True) + >>> contact = api.create(client, "Contact", Firstname="Rita", Lastname="Mohale") + >>> sampletype = api.create(setup.bika_sampletypes, "SampleType", title="Water", Prefix="W") + >>> labcontact = api.create(setup.bika_labcontacts, "LabContact", Firstname="Lab", Lastname="Manager") + >>> department = api.create(setup.bika_departments, "Department", title="Chemistry", Manager=labcontact) + >>> category = api.create(setup.bika_analysiscategories, "AnalysisCategory", title="Metals", Department=department) + >>> Au = api.create(setup.bika_analysisservices, "AnalysisService", title="Gold", Keyword="Au", Price="20", Category=category.UID()) + >>> Cu = api.create(setup.bika_analysisservices, "AnalysisService", title="Copper", Keyword="Cu", Price="15", Category=category.UID()) + >>> Fe = api.create(setup.bika_analysisservices, "AnalysisService", title="Iron", Keyword="Fe", Price="10", Category=category.UID()) + >>> Mg = api.create(setup.bika_analysisservices, "AnalysisService", title="Magnesium", Keyword="Mg", Price="20", Category=category.UID()) + >>> Zn = api.create(setup.bika_analysisservices, "AnalysisService", title="Zinc", Keyword="Zn", Price="10", Category=category.UID()) + +Create an Analysis Specification for `Water`: + + >>> sampletype_uid = api.get_uid(sampletype) + >>> rr1 = {"uid": api.get_uid(Au), "min": 10, "max": 20, "warn_min": 5, "warn_max": 25} + >>> rr2 = {"uid": api.get_uid(Cu), "min": 20, "max": 30, "warn_min": 15, "warn_max": 35} + >>> rr3 = {"uid": api.get_uid(Fe), "min": 30, "max": 40, "warn_min": 25, "warn_max": 45} + >>> rr4 = {"uid": api.get_uid(Mg), "min": 40, "max": 50, "warn_min": 35, "warn_max": 55} + >>> rr5 = {"uid": api.get_uid(Zn), "min": 50, "max": 60, "warn_min": 45, "warn_max": 65} + >>> rr = [rr1, rr2, rr3, rr4, rr5] + >>> specification = api.create(setup.bika_analysisspecs, "AnalysisSpec", title="Lab Water Spec", SampleType=sampletype_uid, ResultsRange=rr) + + +Creation of a Sample with Specification +--------------------------------------- + +A given Specification can be assigned to the Sample during the creation process. +The results ranges of the mentioned Specification will be stored in ResultsRange +field from the Sample and the analyses will acquire those results ranges +individually. + +Specification from Sample is history-aware, so even if the Specification object +is changed after its assignment to the Sample, the Results Ranges from either +the Sample and its Analyses will remain untouched. + +Create a Sample and receive: + + >>> services = [Au, Cu, Fe, Mg] + >>> sample = new_sample(services, specification=specification) + +The sample has the specification assigned: + + >>> sample.getSpecification() + + +And its results ranges match with the sample's `ResultsRange` field value: + + >>> specification.getResultsRange() == sample.getResultsRange() + True + +And the analyses the sample contains have the results ranges properly set: + + >>> au = get_analysis_from(sample, Au) + >>> au.getResultsRange() == get_results_range_from(specification, Au) + True + + >>> cu = get_analysis_from(sample, Cu) + >>> cu.getResultsRange() == get_results_range_from(specification, Cu) + True + + >>> fe = get_analysis_from(sample, Fe) + >>> fe.getResultsRange() == get_results_range_from(specification, Fe) + True + + >>> mg = get_analysis_from(sample, Mg) + >>> mg.getResultsRange() == get_results_range_from(specification, Mg) + True + +We can change a result range by using properties: + + >>> rr_au = au.getResultsRange() + >>> rr_au.min = 11 + >>> rr_au.max = 21 + >>> (rr_au.min, rr_au.max) + (11, 21) + +Or using it as a dict: + + >>> rr_au["min"] = 15 + >>> rr_au["max"] = 25 + >>> (rr_au["min"], rr_au["max"]) + (15, 25) + +If we change this results range in the Specification object, this won't take any +effect to neither the Sample nor analyses: + + >>> set_results_range_for(specification, rr_au) + >>> specification.getResultsRange() == sample.getResultsRange() + False + + >>> au.getResultsRange() == get_results_range_from(specification, Au) + False + + >>> get_results_range_from(sample, Au) == au.getResultsRange() + True + + >>> rr_sample_au = au.getResultsRange() + >>> (rr_sample_au.min, rr_sample_au.max) + (10, 20) + +If we re-apply the Specification, nothing will change though, because its `uid` +is still the same: + + >>> sample.setSpecification(specification) + >>> specification.getResultsRange() == sample.getResultsRange() + False + +But the ResultsRange value from Sample is updated accordingly if we set the +specification to `None` first: + + >>> sample.setSpecification(None) + >>> sample.setSpecification(specification) + >>> specification.getResultsRange() == sample.getResultsRange() + True + +As well as the analyses the sample contains: + + >>> au.getResultsRange() == get_results_range_from(specification, Au) + True + + >>> rr_sample_au = au.getResultsRange() + >>> (rr_sample_au.min, rr_sample_au.max) + (15, 25) + +Removal of Analyses from a Sample with Specifications +----------------------------------------------------- + +User can remove analyses from the Sample. If the user removes one of the +analyses, the Specification assigned to the Sample will remain intact, as well +as Sample's Results Range: + + >>> sample.setAnalyses([Au, Cu, Fe]) + >>> analyses = sample.objectValues() + >>> sorted(analyses, key=lambda an: an.getKeyword()) + [, , ] + + >>> sample.getSpecification() + + + >>> specification.getResultsRange() == sample.getResultsRange() + True + + +Addition of Analyses to a Sample with Specifications +---------------------------------------------------- + +User can add new analyses to the Sample as well. If the Sample has an +Specification set and the specification had a results range registered for +such analysis, the result range for the new analysis will be set automatically: + + >>> sample.setAnalyses([Au, Cu, Fe, Zn]) + >>> sample.getSpecification() + + + >>> zn = get_analysis_from(sample, Zn) + >>> zn.getResultsRange() == get_results_range_from(specification, Zn) + True + +If we reset an Analysis with it's own ResultsRange, different from the range +defined by the Specification, the system does not clear the Specification: + + >>> rr_zn = zn.getResultsRange() + >>> rr_zn.min = 55 + >>> sample.setAnalyses([Au, Cu, Fe, Zn], specs=[rr_zn]) + >>> sample.getSpecification() + + +and Sample's ResultsRange is kept unchanged: + + >>> sample_rr = sample.getResultsRange() + >>> len(sample_rr) + 5 + +with result range for `Zn` unchanged: + + >>> sample_rr_zn = sample.getResultsRange(search_by=api.get_uid(Zn)) + >>> sample_rr_zn.min + 50 + +But analysis' result range has indeed changed: + + >>> zn.getResultsRange().min + 55 + +If we re-apply the Specification, the result range for `Zn`, as well as for the +Sample, are reestablished: + + >>> sample.setSpecification(None) + >>> sample.setSpecification(specification) + >>> specification.getResultsRange() == sample.getResultsRange() + True + + >>> zn.getResultsRange() == get_results_range_from(specification, Zn) + True + + >>> zn.getResultsRange().min + 50 + + +Sample with Specifications and Partitions +----------------------------------------- + +When a sample has partitions, the Specification set to the root Sample is +populated to all its descendants: + + >>> partition = create_partition(sample, request, [zn]) + >>> partition + + + >>> zn = get_analysis_from(partition, Zn) + >>> zn + + +The partition keeps the Specification and ResultsRange by its own: + + >>> partition.getSpecification() + + + >>> partition.getResultsRange() == specification.getResultsRange() + True + +If we reset an Analysis with it's own ResultsRange, different from the range +defined by the Specification, the system does not clear the Specification, +neither from the root sample nor the partition: + + >>> rr_zn = zn.getResultsRange() + >>> rr_zn.min = 56 + >>> partition.setAnalyses([Zn], specs=[rr_zn]) + + >>> sample.getSpecification() + + + >>> partition.getSpecification() + + +And Results Range from both Sample and partition are kept untouched: + + >>> sample.getSpecification() + + + >>> sample.getResultsRange() == specification.getResultsRange() + True + + >>> partition.getSpecification() + + + >>> partition.getResultsRange() == specification.getResultsRange() + True diff --git a/bika/lims/upgrade/v01_03_003.py b/bika/lims/upgrade/v01_03_003.py index ca14abc86e..8b9e32b21d 100644 --- a/bika/lims/upgrade/v01_03_003.py +++ b/bika/lims/upgrade/v01_03_003.py @@ -21,17 +21,21 @@ from collections import defaultdict from operator import itemgetter +import transaction from bika.lims import api from bika.lims import logger +from bika.lims.catalog import CATALOG_ANALYSIS_REQUEST_LISTING from bika.lims.catalog.bikasetup_catalog import SETUP_CATALOG from bika.lims.config import PROJECTNAME as product -from bika.lims.setuphandlers import add_dexterity_setup_items +from bika.lims.interfaces import IAnalysisRequestWithPartitions from bika.lims.interfaces import ISubmitted from bika.lims.interfaces import IVerified +from bika.lims.setuphandlers import add_dexterity_setup_items from bika.lims.setuphandlers import setup_form_controller_actions from bika.lims.upgrade import upgradestep from bika.lims.upgrade.utils import UpgradeUtils from Products.Archetypes.config import UID_CATALOG +from zope.interface import alsoProvides version = "1.3.3" # Remember version number in metadata.xml and setup.py profile = "profile-{0}:default".format(product) @@ -271,12 +275,23 @@ def upgrade(tool): # https://github.com/senaite/senaite.core/pull/1480 setup_form_controller_actions(portal) + # Mark primary samples with IAnalysisRequestPrimary + mark_samples_with_partitions(portal) + # Add the dynamic analysisspecs folder # https://github.com/senaite/senaite.core/pull/1492 setup.runImportStepFromProfile(profile, "typeinfo") setup.runImportStepFromProfile(profile, "controlpanel") add_dexterity_setup_items(portal) + # Reset the results ranges from Specification objects (to include uid) + # https://github.com/senaite/senaite.core/pull/1506 + reset_specifications_ranges(portal) + + # Update the ResultsRange field from Samples and their analyses as needed + # https://github.com/senaite/senaite.core/pull/1506 + update_samples_result_ranges(portal) + logger.info("{0} upgraded to version {1}".format(product, version)) return True @@ -542,3 +557,91 @@ def add_index(catalog_id, index_name, index_metatype): catalog.addIndex(index_name, index_metatype) logger.info("Indexing new index '{}' ...".format(index_name)) catalog.manage_reindexIndex(index_name) + + +def mark_samples_with_partitions(portal): + logger.info("Marking Samples with partitions ...") + query = dict(portal_type="AnalysisRequest", isRootAncestor=False) + brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING) + total = len(brains) + for num, brain in enumerate(brains): + if num and num % 100 == 0: + logger.info("Marking samples with partitions: {}/{}" + .format(num, total)) + transaction.commit() + part = api.get_object(brain) + parent = part.getParentAnalysisRequest() + if not parent: + logger.error("Partition w/o Parent: {}".format(api.get_id(part))) + + elif not IAnalysisRequestWithPartitions.providedBy(parent): + alsoProvides(parent, IAnalysisRequestWithPartitions) + + logger.info("Marking Samples with partitions [DONE]") + + +def reset_specifications_ranges(portal): + """Reset the result ranges to existing Specification objects. Prior + versions were not storing the service uid in the result range + """ + logger.info("Add uids to Specification ranges subfields ...") + specifications = portal.bika_setup.bika_analysisspecs + for specification in specifications.objectValues("AnalysisSpec"): + specification.setResultsRange(specification.getResultsRange()) + logger.info("Add uids to Specification ranges subfields [DONE]") + + +def update_samples_result_ranges(portal): + """Stores the result range field for those samples that have a + specification assigned. In prior versions, getResultsRange was relying + on Specification's ResultsRange + """ + query = dict(portal_type="AnalysisRequest") + brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING) + total = len(brains) + for num, brain in enumerate(brains): + if num and num % 1000 == 0: + logger.info("{}/{} samples processed ...".format(num, total)) + transaction.commit() + logger.info("Changes commited") + sample = api.get_object(brain) + + # Check if the ResultsRange field from sample contains values already + ar_range = sample.getResultsRange() + if ar_range: + # This sample has results range already set, probably assigned + # manually through Manage analyses + # Reassign the results range (for uid subfield resolution) + field = sample.getField("ResultsRange") + field.set(sample, ar_range) + + # Store the result range directly to their analyses + update_analyses_results_range(sample) + + # No need to go further + continue + + # Check if the Sample has Specification set + spec_uid = sample.getRawSpecification() + if not spec_uid: + # This sample does not have a specification set, skip + continue + + # Store the specification results range to the Sample + specification = sample.getSpecification() + result_range = specification.getResultsRange() + sample.getField("ResultsRange").set(sample, result_range) + + # Store the result range directly to their analyses + update_analyses_results_range(sample) + + +def update_analyses_results_range(sample): + field = sample.getField("ResultsRange") + for analysis in sample.objectValues("Analysis"): + service_uid = analysis.getRawAnalysisService() + analysis_rr = field.get(sample, search_by=service_uid) + if analysis_rr: + analysis = api.get_object(analysis) + analysis.setResultsRange(analysis_rr) + analysis.reindexObject() diff --git a/bika/lims/utils/analysis.py b/bika/lims/utils/analysis.py index f3aa92d3cb..84948b68b1 100644 --- a/bika/lims/utils/analysis.py +++ b/bika/lims/utils/analysis.py @@ -78,6 +78,7 @@ def copy_analysis_field_values(source, analysis, **kwargs): mutator = getattr(analysis, mutator_name) mutator(value) + def create_analysis(context, source, **kwargs): """Create a new Analysis. The source can be an Analysis Service or an existing Analysis, and all possible field values will be set to the @@ -85,7 +86,7 @@ def create_analysis(context, source, **kwargs): :param context: The analysis will be created inside this object. :param source: The schema of this object will be used to populate analysis. :param kwargs: The values of any keys which match schema fieldnames will - be inserted into the corrosponding fields in the new analysis. + be inserted into the corresponding fields in the new analysis. :returns: Analysis object that was created :rtype: Analysis """ diff --git a/bika/lims/utils/analysisrequest.py b/bika/lims/utils/analysisrequest.py index be727a96d6..b727622a55 100644 --- a/bika/lims/utils/analysisrequest.py +++ b/bika/lims/utils/analysisrequest.py @@ -18,12 +18,14 @@ # Copyright 2018-2019 by it's authors. # Some rights reserved, see README and LICENSE. +import six import itertools import os import tempfile from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from Products.Archetypes.config import UID_CATALOG from Products.CMFCore.utils import getToolByName from Products.CMFPlone.utils import _createObjectByType from Products.CMFPlone.utils import safe_unicode @@ -34,6 +36,7 @@ from bika.lims import api from bika.lims import bikaMessageFactory as _ from bika.lims import logger +from bika.lims.catalog import SETUP_CATALOG from bika.lims.idserver import renameAfterCreation from bika.lims.interfaces import IAnalysisRequest from bika.lims.interfaces import IAnalysisRequestRetest @@ -56,41 +59,36 @@ def create_analysisrequest(client, request, values, analyses=None, - partitions=None, specifications=None, prices=None): - """This is meant for general use and should do everything necessary to - create and initialise an AR and any other required auxilliary objects - (Sample, SamplePartition, Analysis...) - :param client: - The container (Client) in which the ARs will be created. - :param request: - The current Request object. - :param values: - a dict, where keys are AR|Sample schema field names. - :param analyses: - Analysis services list. If specified, augments the values in - values['Analyses']. May consist of service objects, UIDs, or Keywords. - :param partitions: - A list of dictionaries, if specific partitions are required. If not - specified, AR's sample is created with a single partition. - :param specifications: - These values augment those found in values['Specifications'] - :param prices: - Allow different prices to be set for analyses. If not set, prices + results_ranges=None, prices=None): + """Creates a new AnalysisRequest (a Sample) object + :param client: The container where the Sample will be created + :param request: The current Http Request object + :param values: A dict, with keys as AnalaysisRequest's schema field names + :param analyses: List of Services or Analyses (brains, objects, UIDs, + keywords). Extends the list from values["Analyses"] + :param results_ranges: List of Results Ranges. Extends the results ranges + from the Specification object defined in values["Specification"] + :param prices: Mapping of AnalysisService UID -> price. If not set, prices are read from the associated analysis service. """ # Don't pollute the dict param passed in values = dict(values.items()) - # Create the Analysis Request - ar = _createObjectByType('AnalysisRequest', client, tmpID()) + # Resolve the Service uids of analyses to be added in the Sample. Values + # passed-in might contain Profiles and also values that are not uids. Also, + # additional analyses can be passed-in through either values or services + service_uids = to_services_uids(values=values, services=analyses) + + # Remove the Analyses from values. We will add them manually + values.update({"Analyses": []}) - # Resolve the services uids and set the analyses for this Analysis Request - service_uids = get_services_uids(context=client, values=values, - analyses_serv=analyses) - ar.setAnalyses(service_uids, prices=prices, specs=specifications) - values.update({"Analyses": service_uids}) + # Create the Analysis Request and submit the form + ar = _createObjectByType('AnalysisRequest', client, tmpID()) ar.processForm(REQUEST=request, values=values) + # Set the analyses manually + ar.setAnalyses(service_uids, prices=prices, specs=results_ranges) + # Handle hidden analyses from template and profiles # https://github.com/senaite/senaite.core/issues/1437 # https://github.com/senaite/senaite.core/issues/1326 @@ -189,93 +187,78 @@ def get_hidden_service_uids(profile_or_template): return map(lambda setting: setting["uid"], hidden) -def get_services_uids(context=None, analyses_serv=None, values=None): +def to_services_uids(services=None, values=None): """ - This function returns a list of UIDs from analyses services from its - parameters. - :param analyses_serv: A list (or one object) of service-related info items. - see _resolve_items_to_service_uids() docstring. - :type analyses_serv: list + Returns a list of Analysis Services uids + :param services: A list of service items (uid, keyword, brain, obj, title) :param values: a dict, where keys are AR|Sample schema field names. - :type values: dict - :returns: a list of analyses services UIDs + :returns: a list of Analyses Services UIDs """ - if not analyses_serv: - analyses_serv = [] - if not values: - values = {} + def to_list(value): + if not value: + return [] + if isinstance(value, six.string_types): + return [value] + if isinstance(value, (list, tuple)): + return value + logger.warn("Cannot convert to a list: {}".format(value)) + return [] - if not context or (not analyses_serv and not values): - raise RuntimeError( - "get_services_uids: Missing or wrong parameters.") + services = services or [] + values = values or {} # Merge analyses from analyses_serv and values into one list - analyses_services = analyses_serv + (values.get("Analyses", None) or []) - - # It is possible to create analysis requests - # by JSON petitions and services, profiles or types aren't allways send. - # Sometimes we can get analyses and profiles that doesn't match and we - # should act in consequence. - # Getting the analyses profiles - analyses_profiles = values.get('Profiles', []) - if not isinstance(analyses_profiles, (list, tuple)): - # Plone converts the incoming form value to a list, if there are - # multiple values; but if not, it will send a string (a single UID). - analyses_profiles = [analyses_profiles] - - if not analyses_services and not analyses_profiles: - return [] + uids = to_list(services) + to_list(values.get("Analyses")) + + # Convert them to a list of service uids + uids = filter(None, map(to_service_uid, uids)) - # Add analysis services UIDs from profiles to analyses_services variable. - if analyses_profiles: - uid_catalog = getToolByName(context, 'uid_catalog') - for brain in uid_catalog(UID=analyses_profiles): + # Extend with service uids from profiles + profiles = to_list(values.get("Profiles")) + if profiles: + uid_catalog = api.get_tool(UID_CATALOG) + for brain in uid_catalog(UID=profiles): profile = api.get_object(brain) - # Only services UIDs - services_uids = profile.getRawService() - # _resolve_items_to_service_uids() will remove duplicates - analyses_services += services_uids - - return _resolve_items_to_service_uids(analyses_services) - - -def _resolve_items_to_service_uids(items): - """ Returns a list of service uids without duplicates based on the items - :param items: - A list (or one object) of service-related info items. The list can be - heterogeneous and each item can be: - - Analysis Service instance - - Analysis instance - - Analysis Service title - - Analysis Service UID - - Analysis Service Keyword - If an item that doesn't match any of the criterias above is found, the - function will raise a RuntimeError + uids.extend(profile.getRawService() or []) + + # Get the service uids without duplicates, but preserving the order + return list(dict.fromkeys(uids).keys()) + + +def to_service_uid(uid_brain_obj_str): + """Resolves the passed in element to a valid uid. Returns None if the value + cannot be resolved to a valid uid """ - def resolve_to_uid(item): - if api.is_uid(item): - return item - elif IAnalysisService.providedBy(item): - return item.UID() - elif IRoutineAnalysis.providedBy(item): - return item.getServiceUID() - - bsc = api.get_tool("bika_setup_catalog") - brains = bsc(portal_type='AnalysisService', getKeyword=item) - if brains: - return brains[0].UID - brains = bsc(portal_type='AnalysisService', title=item) - if brains: - return brains[0].UID - raise RuntimeError( - str(item) + " should be the UID, title, keyword " - " or title of an AnalysisService.") - - # Maybe only a single item was passed - if type(items) not in (list, tuple): - items = [items, ] - service_uids = map(resolve_to_uid, list(set(items))) - return list(set(service_uids)) + if api.is_uid(uid_brain_obj_str) and uid_brain_obj_str != "0": + return uid_brain_obj_str + + if api.is_object(uid_brain_obj_str): + obj = api.get_object(uid_brain_obj_str) + + if IAnalysisService.providedBy(obj): + return api.get_uid(obj) + + elif IRoutineAnalysis.providedBy(obj): + return obj.getServiceUID() + + else: + logger.error("Type not supported: {}".format(obj.portal_type)) + return None + + if isinstance(uid_brain_obj_str, six.string_types): + # Maybe is a keyword? + query = dict(portal_type="AnalysisService", getKeyword=uid_brain_obj_str) + brains = api.search(query, SETUP_CATALOG) + if len(brains) == 1: + return api.get_uid(brains[0]) + + # Or maybe a title + query = dict(portal_type="AnalysisService", title=uid_brain_obj_str) + brains = api.search(query, SETUP_CATALOG) + if len(brains) == 1: + return api.get_uid(brains[0]) + + return None def notify_rejection(analysisrequest): @@ -432,7 +415,7 @@ def create_retest(ar): def create_partition(analysis_request, request, analyses, sample_type=None, container=None, preservation=None, skip_fields=None, - remove_primary_analyses=True, internal_use=True): + internal_use=True): """ Creates a partition for the analysis_request (primary) passed in :param analysis_request: uid/brain/object of IAnalysisRequest type @@ -442,7 +425,6 @@ def create_partition(analysis_request, request, analyses, sample_type=None, :param container: uid/brain/object of Container :param preservation: uid/brain/object of Preservation :param skip_fields: names of fields to be skipped on copy from primary - :param remove_primary_analyses: removes the analyses from the parent :return: the new partition """ partition_skip_fields = [ @@ -488,15 +470,14 @@ def create_partition(analysis_request, request, analyses, sample_type=None, client = ar.getClient() analyses = list(set(map(api.get_object, analyses))) services = map(lambda an: an.getAnalysisService(), analyses) - specs = ar.getSpecification() - specs = specs and specs.getResultsRange() or [] - partition = create_analysisrequest(client, request=request, values=record, - analyses=services, specifications=specs) - - # Remove analyses from the primary - if remove_primary_analyses: - analyses_ids = map(api.get_id, analyses) - ar.manage_delObjects(analyses_ids) + + # Populate the root's ResultsRanges to partitions + results_ranges = ar.getResultsRange() or [] + partition = create_analysisrequest(client, + request=request, + values=record, + analyses=services, + results_ranges=results_ranges) # Reindex Parent Analysis Request ar.reindexObject(idxs=["isRootAncestor"]) diff --git a/bika/lims/workflow/analysisrequest/events.py b/bika/lims/workflow/analysisrequest/events.py index 8edc785c19..2b570ac17c 100644 --- a/bika/lims/workflow/analysisrequest/events.py +++ b/bika/lims/workflow/analysisrequest/events.py @@ -202,3 +202,9 @@ def after_detach(analysis_request): # Reindex both the parent and the detached one analysis_request.reindexObject() parent.reindexObject() + + # And the analyses too. aranalysesfield relies on a search against the + # catalog to return the analyses: calling `getAnalyses` to the parent + # will return all them, so no need to do the same with the detached + analyses = parent.getAnalyses(full_objects=True) + map(lambda an: an.reindexObject(), analyses)
+ + + + + + @@ -47,6 +60,7 @@
+ + + + + + @@ -77,6 +102,7 @@ +