polars_core/series/implementations/
decimal.rs

1use polars_compute::rolling::QuantileMethod;
2
3use super::*;
4use crate::prelude::*;
5
6unsafe impl IntoSeries for DecimalChunked {
7    fn into_series(self) -> Series {
8        Series(Arc::new(SeriesWrap(self)))
9    }
10}
11
12impl private::PrivateSeriesNumeric for SeriesWrap<DecimalChunked> {
13    fn bit_repr(&self) -> Option<BitRepr> {
14        Some(self.0.physical().to_bit_repr())
15    }
16}
17
18impl SeriesWrap<DecimalChunked> {
19    fn apply_physical_to_s<F: Fn(&Int128Chunked) -> Int128Chunked>(&self, f: F) -> Series {
20        f(self.0.physical())
21            .into_decimal_unchecked(self.0.precision(), self.0.scale())
22            .into_series()
23    }
24
25    fn apply_physical<T, F: Fn(&Int128Chunked) -> T>(&self, f: F) -> T {
26        f(self.0.physical())
27    }
28
29    fn scale_factor(&self) -> u128 {
30        10u128.pow(self.0.scale() as u32)
31    }
32
33    fn apply_scale(&self, mut scalar: Scalar) -> Scalar {
34        if scalar.is_null() {
35            return scalar;
36        }
37
38        debug_assert_eq!(scalar.dtype(), &DataType::Float64);
39        let v = scalar
40            .value()
41            .try_extract::<f64>()
42            .expect("should be f64 scalar");
43        scalar.update((v / self.scale_factor() as f64).into());
44        scalar
45    }
46
47    fn agg_helper<F: Fn(&Int128Chunked) -> Series>(&self, f: F) -> Series {
48        let agg_s = f(self.0.physical());
49        match agg_s.dtype() {
50            DataType::Int128 => {
51                let ca = agg_s.i128().unwrap();
52                let ca = ca.as_ref().clone();
53                let precision = self.0.precision();
54                let scale = self.0.scale();
55                ca.into_decimal_unchecked(precision, scale).into_series()
56            },
57            DataType::List(dtype) if matches!(dtype.as_ref(), DataType::Int128) => {
58                let dtype = self.0.dtype();
59                let ca = agg_s.list().unwrap();
60                let arr = ca.downcast_iter().next().unwrap();
61                // SAFETY: dtype is passed correctly
62                let precision = self.0.precision();
63                let scale = self.0.scale();
64                let s = unsafe {
65                    Series::from_chunks_and_dtype_unchecked(
66                        PlSmallStr::EMPTY,
67                        vec![arr.values().clone()],
68                        dtype,
69                    )
70                }
71                .into_decimal(precision, scale)
72                .unwrap();
73                let new_values = s.array_ref(0).clone();
74                let dtype = DataType::Int128;
75                let arrow_dtype =
76                    ListArray::<i64>::default_datatype(dtype.to_arrow(CompatLevel::newest()));
77                let new_arr = ListArray::<i64>::new(
78                    arrow_dtype,
79                    arr.offsets().clone(),
80                    new_values,
81                    arr.validity().cloned(),
82                );
83                unsafe {
84                    ListChunked::from_chunks_and_dtype_unchecked(
85                        agg_s.name().clone(),
86                        vec![Box::new(new_arr)],
87                        DataType::List(Box::new(DataType::Decimal(precision, scale))),
88                    )
89                    .into_series()
90                }
91            },
92            _ => unreachable!(),
93        }
94    }
95}
96
97impl private::PrivateSeries for SeriesWrap<DecimalChunked> {
98    fn compute_len(&mut self) {
99        self.0.physical_mut().compute_len()
100    }
101
102    fn _field(&self) -> Cow<'_, Field> {
103        Cow::Owned(self.0.field())
104    }
105
106    fn _dtype(&self) -> &DataType {
107        self.0.dtype()
108    }
109    fn _get_flags(&self) -> StatisticsFlags {
110        self.0.physical().get_flags()
111    }
112    fn _set_flags(&mut self, flags: StatisticsFlags) {
113        self.0.physical_mut().set_flags(flags)
114    }
115
116    #[cfg(feature = "zip_with")]
117    fn zip_with_same_type(&self, mask: &BooleanChunked, other: &Series) -> PolarsResult<Series> {
118        let other = other.decimal()?;
119
120        Ok(self
121            .0
122            .physical()
123            .zip_with(mask, other.physical())?
124            .into_decimal_unchecked(self.0.precision(), self.0.scale())
125            .into_series())
126    }
127    fn into_total_eq_inner<'a>(&'a self) -> Box<dyn TotalEqInner + 'a> {
128        self.0.physical().into_total_eq_inner()
129    }
130    fn into_total_ord_inner<'a>(&'a self) -> Box<dyn TotalOrdInner + 'a> {
131        self.0.physical().into_total_ord_inner()
132    }
133
134    fn vec_hash(
135        &self,
136        random_state: PlSeedableRandomStateQuality,
137        buf: &mut Vec<u64>,
138    ) -> PolarsResult<()> {
139        self.0.physical().vec_hash(random_state, buf)?;
140        Ok(())
141    }
142
143    fn vec_hash_combine(
144        &self,
145        build_hasher: PlSeedableRandomStateQuality,
146        hashes: &mut [u64],
147    ) -> PolarsResult<()> {
148        self.0.physical().vec_hash_combine(build_hasher, hashes)?;
149        Ok(())
150    }
151
152    #[cfg(feature = "algorithm_group_by")]
153    unsafe fn agg_sum(&self, groups: &GroupsType) -> Series {
154        self.agg_helper(|ca| ca.agg_sum(groups))
155    }
156
157    #[cfg(feature = "algorithm_group_by")]
158    unsafe fn agg_min(&self, groups: &GroupsType) -> Series {
159        self.agg_helper(|ca| ca.agg_min(groups))
160    }
161
162    #[cfg(feature = "algorithm_group_by")]
163    unsafe fn agg_max(&self, groups: &GroupsType) -> Series {
164        self.agg_helper(|ca| ca.agg_max(groups))
165    }
166
167    #[cfg(feature = "algorithm_group_by")]
168    unsafe fn agg_list(&self, groups: &GroupsType) -> Series {
169        self.agg_helper(|ca| ca.agg_list(groups))
170    }
171
172    #[cfg(feature = "algorithm_group_by")]
173    unsafe fn agg_var(&self, groups: &GroupsType, ddof: u8) -> Series {
174        self.0
175            .cast(&DataType::Float64)
176            .unwrap()
177            .agg_var(groups, ddof)
178    }
179
180    #[cfg(feature = "algorithm_group_by")]
181    unsafe fn agg_std(&self, groups: &GroupsType, ddof: u8) -> Series {
182        self.0
183            .cast(&DataType::Float64)
184            .unwrap()
185            .agg_std(groups, ddof)
186    }
187
188    fn subtract(&self, rhs: &Series) -> PolarsResult<Series> {
189        let rhs = rhs.decimal()?;
190        ((&self.0) - rhs).map(|ca| ca.into_series())
191    }
192    fn add_to(&self, rhs: &Series) -> PolarsResult<Series> {
193        let rhs = rhs.decimal()?;
194        ((&self.0) + rhs).map(|ca| ca.into_series())
195    }
196    fn multiply(&self, rhs: &Series) -> PolarsResult<Series> {
197        let rhs = rhs.decimal()?;
198        ((&self.0) * rhs).map(|ca| ca.into_series())
199    }
200    fn divide(&self, rhs: &Series) -> PolarsResult<Series> {
201        let rhs = rhs.decimal()?;
202        ((&self.0) / rhs).map(|ca| ca.into_series())
203    }
204    #[cfg(feature = "algorithm_group_by")]
205    fn group_tuples(&self, multithreaded: bool, sorted: bool) -> PolarsResult<GroupsType> {
206        self.0.physical().group_tuples(multithreaded, sorted)
207    }
208    fn arg_sort_multiple(
209        &self,
210        by: &[Column],
211        options: &SortMultipleOptions,
212    ) -> PolarsResult<IdxCa> {
213        self.0.physical().arg_sort_multiple(by, options)
214    }
215}
216
217impl SeriesTrait for SeriesWrap<DecimalChunked> {
218    fn rename(&mut self, name: PlSmallStr) {
219        self.0.rename(name)
220    }
221
222    fn chunk_lengths(&self) -> ChunkLenIter<'_> {
223        self.0.physical().chunk_lengths()
224    }
225
226    fn name(&self) -> &PlSmallStr {
227        self.0.name()
228    }
229
230    fn chunks(&self) -> &Vec<ArrayRef> {
231        self.0.physical().chunks()
232    }
233    unsafe fn chunks_mut(&mut self) -> &mut Vec<ArrayRef> {
234        self.0.physical_mut().chunks_mut()
235    }
236
237    fn slice(&self, offset: i64, length: usize) -> Series {
238        self.apply_physical_to_s(|ca| ca.slice(offset, length))
239    }
240
241    fn split_at(&self, offset: i64) -> (Series, Series) {
242        let (a, b) = self.0.split_at(offset);
243        (a.into_series(), b.into_series())
244    }
245
246    fn append(&mut self, other: &Series) -> PolarsResult<()> {
247        polars_ensure!(self.0.dtype() == other.dtype(), append);
248        let mut other = other.to_physical_repr().into_owned();
249        self.0
250            .physical_mut()
251            .append_owned(std::mem::take(other._get_inner_mut().as_mut()))
252    }
253    fn append_owned(&mut self, mut other: Series) -> PolarsResult<()> {
254        polars_ensure!(self.0.dtype() == other.dtype(), append);
255        self.0.physical_mut().append_owned(std::mem::take(
256            &mut other
257                ._get_inner_mut()
258                .as_any_mut()
259                .downcast_mut::<DecimalChunked>()
260                .unwrap()
261                .phys,
262        ))
263    }
264
265    fn extend(&mut self, other: &Series) -> PolarsResult<()> {
266        polars_ensure!(self.0.dtype() == other.dtype(), extend);
267        // 3 refs
268        // ref Cow
269        // ref SeriesTrait
270        // ref ChunkedArray
271        let other = other.to_physical_repr();
272        self.0
273            .physical_mut()
274            .extend(other.as_ref().as_ref().as_ref())?;
275        Ok(())
276    }
277
278    fn filter(&self, filter: &BooleanChunked) -> PolarsResult<Series> {
279        Ok(self
280            .0
281            .physical()
282            .filter(filter)?
283            .into_decimal_unchecked(self.0.precision(), self.0.scale())
284            .into_series())
285    }
286
287    fn take(&self, indices: &IdxCa) -> PolarsResult<Series> {
288        Ok(self
289            .0
290            .physical()
291            .take(indices)?
292            .into_decimal_unchecked(self.0.precision(), self.0.scale())
293            .into_series())
294    }
295
296    unsafe fn take_unchecked(&self, indices: &IdxCa) -> Series {
297        self.0
298            .physical()
299            .take_unchecked(indices)
300            .into_decimal_unchecked(self.0.precision(), self.0.scale())
301            .into_series()
302    }
303
304    fn take_slice(&self, indices: &[IdxSize]) -> PolarsResult<Series> {
305        Ok(self
306            .0
307            .physical()
308            .take(indices)?
309            .into_decimal_unchecked(self.0.precision(), self.0.scale())
310            .into_series())
311    }
312
313    unsafe fn take_slice_unchecked(&self, indices: &[IdxSize]) -> Series {
314        self.0
315            .physical()
316            .take_unchecked(indices)
317            .into_decimal_unchecked(self.0.precision(), self.0.scale())
318            .into_series()
319    }
320
321    fn len(&self) -> usize {
322        self.0.len()
323    }
324
325    fn rechunk(&self) -> Series {
326        let ca = self.0.physical().rechunk().into_owned();
327        ca.into_decimal_unchecked(self.0.precision(), self.0.scale())
328            .into_series()
329    }
330
331    fn new_from_index(&self, index: usize, length: usize) -> Series {
332        self.0
333            .physical()
334            .new_from_index(index, length)
335            .into_decimal_unchecked(self.0.precision(), self.0.scale())
336            .into_series()
337    }
338
339    fn cast(&self, dtype: &DataType, cast_options: CastOptions) -> PolarsResult<Series> {
340        self.0.cast_with_options(dtype, cast_options)
341    }
342
343    #[inline]
344    unsafe fn get_unchecked(&self, index: usize) -> AnyValue<'_> {
345        self.0.get_any_value_unchecked(index)
346    }
347
348    fn sort_with(&self, options: SortOptions) -> PolarsResult<Series> {
349        Ok(self
350            .0
351            .physical()
352            .sort_with(options)
353            .into_decimal_unchecked(self.0.precision(), self.0.scale())
354            .into_series())
355    }
356
357    fn arg_sort(&self, options: SortOptions) -> IdxCa {
358        self.0.physical().arg_sort(options)
359    }
360
361    fn null_count(&self) -> usize {
362        self.0.null_count()
363    }
364
365    fn has_nulls(&self) -> bool {
366        self.0.has_nulls()
367    }
368
369    #[cfg(feature = "algorithm_group_by")]
370    fn unique(&self) -> PolarsResult<Series> {
371        Ok(self.apply_physical_to_s(|ca| ca.unique().unwrap()))
372    }
373
374    #[cfg(feature = "algorithm_group_by")]
375    fn n_unique(&self) -> PolarsResult<usize> {
376        self.0.physical().n_unique()
377    }
378
379    #[cfg(feature = "algorithm_group_by")]
380    fn arg_unique(&self) -> PolarsResult<IdxCa> {
381        self.0.physical().arg_unique()
382    }
383
384    fn is_null(&self) -> BooleanChunked {
385        self.0.is_null()
386    }
387
388    fn is_not_null(&self) -> BooleanChunked {
389        self.0.is_not_null()
390    }
391
392    fn reverse(&self) -> Series {
393        self.apply_physical_to_s(|ca| ca.reverse())
394    }
395
396    fn shift(&self, periods: i64) -> Series {
397        self.apply_physical_to_s(|ca| ca.shift(periods))
398    }
399
400    #[cfg(feature = "approx_unique")]
401    fn approx_n_unique(&self) -> PolarsResult<IdxSize> {
402        Ok(ChunkApproxNUnique::approx_n_unique(self.0.physical()))
403    }
404
405    fn clone_inner(&self) -> Arc<dyn SeriesTrait> {
406        Arc::new(SeriesWrap(Clone::clone(&self.0)))
407    }
408
409    fn sum_reduce(&self) -> PolarsResult<Scalar> {
410        Ok(self.apply_physical(|ca| {
411            let sum = ca.sum();
412            let DataType::Decimal(prec, scale) = self.dtype() else {
413                unreachable!()
414            };
415            let av = AnyValue::Decimal(sum.unwrap(), *prec, *scale);
416            Scalar::new(self.dtype().clone(), av)
417        }))
418    }
419
420    fn min_reduce(&self) -> PolarsResult<Scalar> {
421        Ok(self.apply_physical(|ca| {
422            let min = ca.min();
423            let DataType::Decimal(prec, scale) = self.dtype() else {
424                unreachable!()
425            };
426            let av = if let Some(min) = min {
427                AnyValue::Decimal(min, *prec, *scale)
428            } else {
429                AnyValue::Null
430            };
431            Scalar::new(self.dtype().clone(), av)
432        }))
433    }
434
435    fn max_reduce(&self) -> PolarsResult<Scalar> {
436        Ok(self.apply_physical(|ca| {
437            let max = ca.max();
438            let DataType::Decimal(prec, scale) = self.dtype() else {
439                unreachable!()
440            };
441            let av = if let Some(m) = max {
442                AnyValue::Decimal(m, *prec, *scale)
443            } else {
444                AnyValue::Null
445            };
446            Scalar::new(self.dtype().clone(), av)
447        }))
448    }
449
450    fn _sum_as_f64(&self) -> f64 {
451        self.0.physical()._sum_as_f64() / self.scale_factor() as f64
452    }
453
454    fn mean(&self) -> Option<f64> {
455        self.0
456            .physical()
457            .mean()
458            .map(|v| v / self.scale_factor() as f64)
459    }
460
461    fn median(&self) -> Option<f64> {
462        self.0
463            .physical()
464            .median()
465            .map(|v| v / self.scale_factor() as f64)
466    }
467
468    fn median_reduce(&self) -> PolarsResult<Scalar> {
469        Ok(self.apply_scale(self.0.physical().median_reduce()))
470    }
471
472    fn std(&self, ddof: u8) -> Option<f64> {
473        self.0.cast(&DataType::Float64).ok()?.std(ddof)
474    }
475
476    fn std_reduce(&self, ddof: u8) -> PolarsResult<Scalar> {
477        self.0.cast(&DataType::Float64)?.std_reduce(ddof)
478    }
479
480    fn var(&self, ddof: u8) -> Option<f64> {
481        self.0.cast(&DataType::Float64).ok()?.var(ddof)
482    }
483
484    fn var_reduce(&self, ddof: u8) -> PolarsResult<Scalar> {
485        self.0.cast(&DataType::Float64)?.var_reduce(ddof)
486    }
487
488    fn quantile_reduce(&self, quantile: f64, method: QuantileMethod) -> PolarsResult<Scalar> {
489        self.0
490            .physical()
491            .quantile_reduce(quantile, method)
492            .map(|v| self.apply_scale(v))
493    }
494
495    fn find_validity_mismatch(&self, other: &Series, idxs: &mut Vec<IdxSize>) {
496        self.0.physical().find_validity_mismatch(other, idxs)
497    }
498
499    fn as_any(&self) -> &dyn Any {
500        &self.0
501    }
502
503    fn as_any_mut(&mut self) -> &mut dyn Any {
504        &mut self.0
505    }
506
507    fn as_phys_any(&self) -> &dyn Any {
508        self.0.physical()
509    }
510
511    fn as_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
512        self as _
513    }
514}