mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Add offset to blind retrieval
This commit is contained in:
@@ -17,21 +17,31 @@ impl GLWEBlindRetriever {
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
let log2_max_address: usize = (u32::BITS - (size as u32 - 1).leading_zeros()) as usize;
|
||||
let bit_size: usize = (u32::BITS - (size as u32 - 1).leading_zeros()) as usize;
|
||||
Self {
|
||||
accumulators: (0..log2_max_address)
|
||||
accumulators: (0..bit_size)
|
||||
.map(|_| Accumulator::alloc(infos))
|
||||
.collect_vec(),
|
||||
counter: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn retrieve_tmp_bytes<M, R, S, BE: Backend>(module: &M, res: &R, selector: &S) -> usize
|
||||
where
|
||||
M: Cmux<BE>,
|
||||
R: GLWEInfos,
|
||||
S: GGSWInfos,
|
||||
{
|
||||
module.cmux_tmp_bytes(res, res, selector)
|
||||
}
|
||||
|
||||
pub fn retrieve<M, R, A, S, BE: Backend>(
|
||||
&mut self,
|
||||
module: &M,
|
||||
res: &mut R,
|
||||
data: &[A],
|
||||
selector: &S,
|
||||
offset: usize,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
M: GLWECopy + Cmux<BE>,
|
||||
@@ -41,14 +51,13 @@ impl GLWEBlindRetriever {
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
self.reset();
|
||||
|
||||
for ct in data {
|
||||
self.add(module, ct, selector, scratch);
|
||||
self.add(module, ct, selector, offset, scratch);
|
||||
}
|
||||
self.flush(module, res, selector, scratch);
|
||||
self.flush(module, res, selector, offset, scratch);
|
||||
}
|
||||
|
||||
pub fn add<A, S, M, BE: Backend>(&mut self, module: &M, a: &A, selector: &S, scratch: &mut Scratch<BE>)
|
||||
pub fn add<A, S, M, BE: Backend>(&mut self, module: &M, a: &A, selector: &S, offset: usize, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWEToRef,
|
||||
S: GetGGSWBit<BE>,
|
||||
@@ -61,11 +70,19 @@ impl GLWEBlindRetriever {
|
||||
1 << self.accumulators.len()
|
||||
);
|
||||
|
||||
add_core(module, a, &mut self.accumulators, 0, selector, scratch);
|
||||
add_core(
|
||||
module,
|
||||
a,
|
||||
&mut self.accumulators,
|
||||
0,
|
||||
selector,
|
||||
offset,
|
||||
scratch,
|
||||
);
|
||||
self.counter += 1;
|
||||
}
|
||||
|
||||
pub fn flush<R, M, S, BE: Backend>(&mut self, module: &M, res: &mut R, selector: &S, scratch: &mut Scratch<BE>)
|
||||
pub fn flush<R, M, S, BE: Backend>(&mut self, module: &M, res: &mut R, selector: &S, offset: usize, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
S: GetGGSWBit<BE>,
|
||||
@@ -81,6 +98,7 @@ impl GLWEBlindRetriever {
|
||||
acc_next,
|
||||
i + 1,
|
||||
selector,
|
||||
offset,
|
||||
scratch,
|
||||
);
|
||||
acc_prev[0].num = 0
|
||||
@@ -94,6 +112,7 @@ impl GLWEBlindRetriever {
|
||||
for acc in self.accumulators.iter_mut() {
|
||||
acc.num = 0;
|
||||
}
|
||||
self.counter = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,6 +139,7 @@ fn add_core<A, S, M, BE: Backend>(
|
||||
accumulators: &mut [Accumulator],
|
||||
i: usize,
|
||||
selector: &S,
|
||||
offset: usize,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
A: GLWEToRef,
|
||||
@@ -136,7 +156,12 @@ fn add_core<A, S, M, BE: Backend>(
|
||||
acc_prev[0].num = 1;
|
||||
}
|
||||
1 => {
|
||||
module.cmux_inplace_neg(&mut acc_prev[0].data, a, &selector.get_bit(i), scratch);
|
||||
module.cmux_inplace_neg(
|
||||
&mut acc_prev[0].data,
|
||||
a,
|
||||
&selector.get_bit(i + offset),
|
||||
scratch,
|
||||
);
|
||||
|
||||
if !acc_next.is_empty() {
|
||||
add_core(
|
||||
@@ -145,6 +170,7 @@ fn add_core<A, S, M, BE: Backend>(
|
||||
acc_next,
|
||||
i + 1,
|
||||
selector,
|
||||
offset,
|
||||
scratch,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -11,14 +11,14 @@ use crate::tfhe::bdd_arithmetic::{Cmux, GetGGSWBit, UnsignedInteger};
|
||||
|
||||
impl<T: UnsignedInteger, BE: Backend> GGSWBlindRotation<T, BE> for Module<BE>
|
||||
where
|
||||
Self: GLWEBlindRotation<T, BE> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<BE>,
|
||||
Self: GLWEBlindRotation<BE> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GGSWBlindRotation<T: UnsignedInteger, BE: Backend>
|
||||
where
|
||||
Self: GLWEBlindRotation<T, BE> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<BE>,
|
||||
Self: GLWEBlindRotation<BE> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<BE>,
|
||||
{
|
||||
fn ggsw_to_ggsw_blind_rotation_tmp_bytes<R, K>(&self, res_infos: &R, k_infos: &K) -> usize
|
||||
where
|
||||
@@ -161,14 +161,14 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: UnsignedInteger, BE: Backend> GLWEBlindRotation<T, BE> for Module<BE>
|
||||
impl<BE: Backend> GLWEBlindRotation<BE> for Module<BE>
|
||||
where
|
||||
Self: GLWECopy + GLWERotate<BE> + Cmux<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GLWEBlindRotation<T: UnsignedInteger, BE: Backend>
|
||||
pub trait GLWEBlindRotation<BE: Backend>
|
||||
where
|
||||
Self: GLWECopy + GLWERotate<BE> + Cmux<BE>,
|
||||
{
|
||||
@@ -184,7 +184,7 @@ where
|
||||
fn glwe_blind_rotation_inplace<R, K>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
fhe_uint: &K,
|
||||
value: &K,
|
||||
sign: bool,
|
||||
bit_rsh: usize,
|
||||
bit_mask: usize,
|
||||
@@ -195,8 +195,6 @@ where
|
||||
K: GetGGSWBit<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
assert!(bit_rsh + bit_mask <= T::BITS as usize);
|
||||
|
||||
let mut res: GLWE<&mut [u8]> = res.to_mut();
|
||||
|
||||
let (mut tmp_res, scratch_1) = scratch.take_glwe(&res);
|
||||
@@ -219,7 +217,7 @@ where
|
||||
}
|
||||
|
||||
// b <- (b - a) * GGSW(b[i]) + a
|
||||
self.cmux_inplace(b, a, &fhe_uint.get_bit(i + bit_rsh), scratch_1);
|
||||
self.cmux_inplace(b, a, &value.get_bit(i + bit_rsh), scratch_1);
|
||||
|
||||
// ping-pong roles for next iter
|
||||
a_is_res = !a_is_res;
|
||||
|
||||
@@ -471,16 +471,16 @@ where
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
fn cmux_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
fn cmux_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, selector_infos: &B) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
B: GGSWInfos,
|
||||
{
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank() + 1).into(), b_infos.size());
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft((selector_infos.rank() + 1).into(), selector_infos.size());
|
||||
res_dft
|
||||
+ self
|
||||
.glwe_external_product_internal_tmp_bytes(res_infos, a_infos, b_infos)
|
||||
.glwe_external_product_internal_tmp_bytes(res_infos, a_infos, selector_infos)
|
||||
.max(self.vec_znx_big_normalize_tmp_bytes())
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ where
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GGSWPreparedFactory<BE>
|
||||
+ GGSWEncryptSk<BE>
|
||||
+ GLWEBlindRotation<u32, BE>
|
||||
+ GLWEBlindRotation<BE>
|
||||
+ GLWEDecrypt<BE>
|
||||
+ GLWEEncryptSk<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
|
||||
@@ -178,22 +178,28 @@ where
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let mut retriever: GLWEBlindRetriever = GLWEBlindRetriever::alloc(&glwe_infos, data.len());
|
||||
for idx in 0..data.len() as u32 {
|
||||
let offset = 2;
|
||||
let mut idx_enc: FheUintPrepared<Vec<u8>, u32, BE> = FheUintPrepared::alloc_from_infos(module, &ggsw_infos);
|
||||
idx_enc.encrypt_sk(
|
||||
module,
|
||||
idx,
|
||||
idx << offset,
|
||||
sk,
|
||||
&mut source_xa,
|
||||
&mut source_xe,
|
||||
scratch.borrow(),
|
||||
);
|
||||
|
||||
let mut retriever: GLWEBlindRetriever = GLWEBlindRetriever::alloc(&glwe_infos, 25);
|
||||
let mut res: FheUint<Vec<u8>, u32> = FheUint::alloc_from_infos(&glwe_infos);
|
||||
retriever.retrieve(module, &mut res, &data_enc, &idx_enc, scratch.borrow());
|
||||
|
||||
println!("{}", res.decrypt(module, sk, scratch.borrow()));
|
||||
retriever.retrieve(
|
||||
module,
|
||||
&mut res,
|
||||
&data_enc,
|
||||
&idx_enc,
|
||||
offset,
|
||||
scratch.borrow(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
data[idx as usize],
|
||||
|
||||
Reference in New Issue
Block a user