Coverage for /opt/conda/envs/apienv/lib/python3.10/site-packages/daiquiri/core/components/tomo/image_resource.py: 48%
105 statements
« prev ^ index » next coverage.py v7.6.5, created at 2024-11-15 02:12 +0000
« prev ^ index » next coverage.py v7.6.5, created at 2024-11-15 02:12 +0000
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
4from __future__ import annotations
6import logging
7from marshmallow import fields
8from PIL import Image
9import pint
10import numpy
12from daiquiri.core import marshal
13from daiquiri.core.components import ComponentResource
14from daiquiri.core.schema import ErrorSchema
15from daiquiri.core.responses import image_response, ndarray_response, iff_response
16from daiquiri.core.utils import worker
17from daiquiri.core.utils import imageutils
19from .datatype import SampleStageMetadata
22logger = logging.getLogger(__name__)
25class TomoImageResource(ComponentResource):
26 @marshal(
27 inp={
28 "scanid": fields.Str(
29 metadata={"description": "Identifier of the scan to get images from"}
30 ),
31 "detectorid": fields.Str(
32 metadata={"description": "Name of the detector to get images from"}
33 ),
34 "process": fields.Str(
35 metadata={
36 "description": "Name of the process retrieving the data, can be 'flat', 'dark', 'proj', 'flatfield'"
37 }
38 ),
39 "node_name": fields.Str(
40 metadata={"description": "The scan node name to get images from"}
41 ),
42 "image_no": fields.Int(
43 metadata={"description": "The image number to load"}
44 ),
45 "encoding": fields.String(
46 metadata={
47 "description": "Format to use for the resulting data: default is `img`"
48 }
49 ),
50 "profiles": fields.String(
51 metadata={
52 "description": "Image profiles which can be used from high to low priority"
53 }
54 ),
55 "norm": fields.String(
56 metadata={
57 "description": "Normalization of the image, can be 'linear', 'log', 'arcsinh', 'sqrt'"
58 }
59 ),
60 "autoscale": fields.String(
61 metadata={
62 "description": "Autoscale for the domain of the image, can be 'none', 'minmax', 'stddev3'"
63 }
64 ),
65 "vmin": fields.Float(
66 metadata={
67 "description": "Manual vmin level for manual scale normalization"
68 }
69 ),
70 "vmax": fields.Float(
71 metadata={
72 "description": "Manual vmax level for manual scale normalization"
73 }
74 ),
75 "lut": fields.String(
76 metadata={
77 "description": "LUT for the colors, can be 'gray', 'gray_r', 'viridis', 'cividis'"
78 }
79 ),
80 "histogram": fields.Bool(
81 metadata={"description": "Include or not an histogram in the response"}
82 ),
83 },
84 out=[
85 # [200, ScanDataSchema(), 'Scan data'],
86 [404, ErrorSchema(), "No such image"],
87 [404, ErrorSchema(), "No data projection"],
88 ],
89 )
90 def get(
91 self,
92 detectorid: str,
93 process: str,
94 node_name: str | None = None,
95 scanid: str | None = None,
96 image_no: int | None = None,
97 encoding: str | None = None,
98 profiles: str | None = None,
99 norm: str = "linear",
100 autoscale: str = "none",
101 vmin: float | None = None,
102 vmax: float | None = None,
103 lut: str = "gray",
104 histogram: bool = False,
105 **kwargs,
106 ):
107 """Get the image for a specific scan"""
108 detector = self._parent.get_detector(detectorid)
109 if detector is None:
110 return {"error": "No such detector"}, 404
112 if encoding is None:
113 return {"error": "No encoding was specified"}, 400
115 def get_data(detector, process):
116 kind = None
117 # FIXME: There is sanitization to do when proj/flat/dark size do not match
118 if process == "dark":
119 if detector.dark is None:
120 raise RuntimeError("No dark data")
121 kind = process
122 data = detector.dark.data
123 elif process == "flat":
124 if detector.flat is None:
125 raise RuntimeError("No flat data")
126 kind = process
127 data = detector.flat.data
128 elif process == "proj":
129 if detector.proj is None:
130 raise RuntimeError("No data projection")
131 kind = process
132 data = detector.proj.data
133 elif process == "flatfield":
134 d = detector
135 if d.proj is None:
136 raise RuntimeError("No data projection")
137 proj = d.proj.normalized
138 if d.dark is None and d.flat is None:
139 kind = "proj_norm"
140 data = proj
141 elif d.dark is None:
142 kind = "proj_flat_norm"
143 with numpy.errstate(divide="ignore", invalid="ignore"):
144 data = proj / d.flat.normalized
145 data[numpy.logical_not(numpy.isfinite(data))] = 0
146 elif d.flat is None:
147 kind = "proj_dark_norm"
148 data = proj - d.dark.normalized
149 else:
150 kind = "flatfield_norm"
151 with numpy.errstate(divide="ignore", invalid="ignore"):
152 data = (proj - d.dark.normalized) / (
153 d.flat.normalized - d.dark.normalized
154 )
155 data[numpy.logical_not(numpy.isfinite(data))] = 0
156 else:
157 raise RuntimeError(f"Process {process} unknown")
158 return data, kind
160 def generate():
161 nonlocal detector, encoding, lut, autoscale, norm, process, profiles, vmin, vmax, histogram
163 try:
164 data, kind = get_data(detector, process)
165 except RuntimeError as e:
166 return {"error": e.args[0]}, 404
168 assert data is not None # nosec
170 extra_headers = {}
171 if detector.proj:
172 extra_headers = self.format_extra_headers(
173 detector.proj.sample_stage_meta
174 )
175 extra_headers["DQR-datakind"] = kind
177 if encoding == "png":
178 # Supported for debugging purpose only
179 d = imageutils.array_to_image(data, autoscale, norm, lut)
180 im = Image.fromarray(d)
181 if im.mode != "RGB":
182 im = im.convert("RGB")
183 return image_response(im, img_format="PNG", extra_headers=extra_headers)
184 elif encoding == "bin":
185 return ndarray_response(data, extra_headers=extra_headers)
186 elif encoding == "iff":
187 if profiles is None:
188 profiles = "raw"
189 try:
190 return iff_response(
191 data,
192 extra_headers=extra_headers,
193 extra_header_type=b"TOMO",
194 profiles=profiles,
195 autoscale=autoscale,
196 norm=norm,
197 vmin=vmin,
198 vmax=vmax,
199 histogram=histogram,
200 )
201 except Exception:
202 logger.error(
203 "Error which create IFF response (autoscale:%s, norm:%s, vmin:%s, vmax:%s)",
204 autoscale,
205 norm,
206 vmin,
207 vmax,
208 exc_info=True,
209 )
210 return {"error": "Problem to format the response"}, 404
211 else:
212 return {"error": f"Unsupported encoding '{encoding}'"}, 404
214 return worker(generate)
216 def format_extra_headers(self, meta: SampleStageMetadata):
217 extra_headers = {}
218 if meta is None:
219 return extra_headers
221 def feed_quantity(key, quantity: pint.Quantity):
222 if quantity is None:
223 return
224 # formatting with short unit (mm, deg...)
225 extra_headers[key] = f"{quantity:~}"
227 feed_quantity("DQR-sy", meta.sy)
228 feed_quantity("DQR-sz", meta.sz)
229 feed_quantity("DQR-sampy", meta.sampy)
230 feed_quantity("DQR-somega", meta.somega)
231 feed_quantity("DQR-detcy", meta.detcy)
232 feed_quantity("DQR-detcz", meta.detcz)
233 feed_quantity("DQR-pixelsize", meta.pixel_size)
234 return extra_headers