1use std::mem::MaybeUninit;
23use num_traits::FromPrimitive;
4use rayon::ThreadPool;
5use rayon::prelude::*;
67use crate::IdxSize;
8use crate::total_ord::TotalOrd;
910/// This is a perfect sort particularly useful for an arg_sort of an arg_sort
11/// The second arg_sort sorts indices from `0` to `len` so can be just assigned to the
12/// new index location.
13///
14/// Besides that we know that all indices are unique and thus not alias so we can parallelize.
15///
16/// This sort does not sort in place and will allocate.
17///
18/// - The right indices are used for sorting
19/// - The left indices are placed at the location right points to.
20///
21/// # Safety
22/// The caller must ensure that the right indexes for `&[(_, IdxSize)]` are integers ranging from `0..idx.len`
23#[cfg(any(target_os = "emscripten", not(target_family = "wasm")))]
24pub unsafe fn perfect_sort(pool: &ThreadPool, idx: &[(IdxSize, IdxSize)], out: &mut Vec<IdxSize>) {
25let chunk_size = std::cmp::max(
26 idx.len() / pool.current_num_threads(),
27 pool.current_num_threads(),
28 );
2930 out.reserve(idx.len());
31let ptr = out.as_mut_ptr() as *const IdxSize as usize;
3233 pool.install(|| {
34 idx.par_chunks(chunk_size).for_each(|indices| {
35let ptr = ptr as *mut IdxSize;
36for (idx_val, idx_location) in indices {
37// SAFETY:
38 // idx_location is in bounds by invariant of this function
39 // and we ensured we have at least `idx.len()` capacity
40unsafe { *ptr.add(*idx_location as usize) = *idx_val };
41 }
42 });
43 });
44// SAFETY:
45 // all elements are written
46unsafe { out.set_len(idx.len()) };
47}
4849// wasm alternative with different signature
50#[cfg(all(not(target_os = "emscripten"), target_family = "wasm"))]
51pub unsafe fn perfect_sort(
52 pool: &crate::wasm::Pool,
53 idx: &[(IdxSize, IdxSize)],
54 out: &mut Vec<IdxSize>,
55) {
56let chunk_size = std::cmp::max(
57 idx.len() / pool.current_num_threads(),
58 pool.current_num_threads(),
59 );
6061 out.reserve(idx.len());
62let ptr = out.as_mut_ptr() as *const IdxSize as usize;
6364 pool.install(|| {
65 idx.par_chunks(chunk_size).for_each(|indices| {
66let ptr = ptr as *mut IdxSize;
67for (idx_val, idx_location) in indices {
68// SAFETY:
69 // idx_location is in bounds by invariant of this function
70 // and we ensured we have at least `idx.len()` capacity
71*ptr.add(*idx_location as usize) = *idx_val;
72 }
73 });
74 });
75// SAFETY:
76 // all elements are written
77out.set_len(idx.len());
78}
7980unsafe fn assume_init_mut<T>(slice: &mut [MaybeUninit<T>]) -> &mut [T] {
81unsafe { &mut *(slice as *mut [MaybeUninit<T>] as *mut [T]) }
82}
8384pub fn arg_sort_ascending<'a, T: TotalOrd + Copy + 'a, Idx, I: IntoIterator<Item = T>>(
85 v: I,
86 scratch: &'a mut Vec<u8>,
87 n: usize,
88) -> &'a mut [Idx]
89where
90Idx: FromPrimitive + Copy,
91{
92// Needed to be able to write back to back in the same buffer.
93debug_assert_eq!(align_of::<T>(), align_of::<(T, Idx)>());
94let size = size_of::<(T, Idx)>();
95let upper_bound = size * n + size;
96 scratch.reserve(upper_bound);
97let scratch_slice = unsafe {
98let cap_slice = scratch.spare_capacity_mut();
99let (_, scratch_slice, _) = cap_slice.align_to_mut::<MaybeUninit<(T, Idx)>>();
100&mut scratch_slice[..n]
101 };
102103for ((i, v), dst) in v.into_iter().enumerate().zip(scratch_slice.iter_mut()) {
104*dst = MaybeUninit::new((v, Idx::from_usize(i).unwrap()));
105 }
106debug_assert_eq!(n, scratch_slice.len());
107108let scratch_slice = unsafe { assume_init_mut(scratch_slice) };
109 scratch_slice.sort_by(|key1, key2| key1.0.tot_cmp(&key2.0));
110111// now we write the indexes in the same array.
112 // So from <T, Idxsize> to <IdxSize>
113unsafe {
114let src = scratch_slice.as_ptr();
115116let (_, scratch_slice_aligned_to_idx, _) = scratch_slice.align_to_mut::<Idx>();
117118let dst = scratch_slice_aligned_to_idx.as_mut_ptr();
119120for i in 0..n {
121 dst.add(i).write((*src.add(i)).1);
122 }
123124&mut scratch_slice_aligned_to_idx[..n]
125 }
126}