project
stringclasses
1 value
name
stringlengths
7
39
informal
stringlengths
127
4.37k
formal
stringlengths
82
14.6k
informal_source
stringlengths
36
53
formal_source
stringlengths
21
46
PFR
multiDist_of_perm
\begin{lemma}[Relabeling]\label{multidist-perm}\lean{multiDist_of_perm}\uses{multidist-def}\leanok If $\phi: \{1,\dots,m\} \to \{1,\dots,m\}$ is a bijection, then $D[X_{[m]}] = D[(X_{\phi(j)})_{1 \leq j \leq m}]$. \end{lemma} \begin{proof}\leanok Trivial. \end{proof}
/-- If `φ : {1, ..., m} → {1, ...,m}` is a bijection, then `D[X_[m]] = D[(X_φ(1), ..., X_φ(m))]`-/ lemma multiDist_of_perm {m : ℕ} {Ω : Fin m → Type*} (hΩ : ∀ i, MeasureSpace (Ω i)) (hΩprob: ∀ i, IsProbabilityMeasure (hΩ i).volume) (X : ∀ i, (Ω i) → G) (φ : Equiv.Perm (Fin m)) : D[fun i ↦ X (φ i); fun i ↦ hΩ (φ i)] = D[X ; hΩ] := by simp [multiDist] congr 1 · apply IdentDistrib.entropy_eq exact { aemeasurable_fst := by apply Measurable.aemeasurable apply Finset.measurable_sum intro i _ exact measurable_pi_apply i aemeasurable_snd := by apply Measurable.aemeasurable apply Finset.measurable_sum intro i _ exact measurable_pi_apply i map_eq := by let sum := fun x : Fin m → G ↦ ∑ i, x i let perm := MeasurableEquiv.piCongrLeft (fun _ ↦ G) φ have perm_apply : ∀ (i : Fin m) (x : Fin m → G), perm x i = x (φ.symm i) := by intro i x simp only [perm] rw [MeasurableEquiv.coe_piCongrLeft, Equiv.piCongrLeft_apply] simp only [eq_rec_constant] have invar : sum ∘ perm = sum := by ext x rw [comp_apply] convert Finset.sum_bijective φ.symm (Equiv.bijective φ.symm) ?_ ?_ · simp only [Finset.mem_univ, implies_true] intro i _ rw [perm_apply i x] calc _ = Measure.map (sum ∘ perm) (Measure.pi fun i ↦ Measure.map (X (φ i)) ℙ) := by rw [invar] _ = Measure.map sum (Measure.map perm (Measure.pi fun i ↦ Measure.map (X (φ i)) ℙ)) := by rw [Measure.map_map] · apply Finset.measurable_sum intro i _ exact measurable_pi_apply i apply measurable_pi_lambda intro i have : (fun x : Fin m → G ↦ perm x i) = (fun x : Fin m → G ↦ x (φ.symm i)) := by ext x exact perm_apply i x rw [this] exact measurable_pi_apply ((Equiv.symm φ) i) _ = _ := by congr exact (MeasureTheory.measurePreserving_piCongrLeft (fun i ↦ Measure.map (X i) ℙ) φ).map_eq } congr 1 convert Finset.sum_bijective φ (Equiv.bijective φ) ?_ ?_ · simp only [Finset.mem_univ, implies_true] simp only [Finset.mem_univ, imp_self, implies_true] -- The condition m ≥ 2 is likely not needed here. /-- Let `m ≥ 2`, and let `X_[m]` be a tuple of `G`-valued random variables. Then `∑ (1 ≤ j, k ≤ m, j ≠ k), d[X_j; -X_k] ≤ m(m-1) D[X_[m]].` -/
pfr/blueprint/src/chapter/torsion.tex:191
pfr/PFR/MoreRuzsaDist.lean:770
PFR
multiRefPackage
\begin{definition}[$\eta$]\label{eta-def-multi}\lean{multiRefPackage}\leanok We set $\eta := \frac{1}{32m^3}$. \end{definition}
/-- A structure that packages all the fixed information in the main argument. See https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/Problem.20when.20instances.20are.20inside.20a.20structure for more discussion of the design choices here. -/ structure multiRefPackage (G Ω₀ : Type*) [MeasureableFinGroup G] [MeasureSpace Ω₀] where /-- The torsion index of the group we are considering. -/ (m : ℕ) (hm : m ≥ 2) (htorsion : ∀ x : G, m • x = 0) (hprob : IsProbabilityMeasure (ℙ : Measure Ω₀)) /-- The random variable -/ (X₀ : Ω₀ → G) (hmeas : Measurable X₀) /-- A small constant. The argument will only work for suitably small `η`. -/ (η : ℝ) (hη : 0 < η) (hη': η ≤ 1) /-- If $(X_i)_{1 \leq i \leq m}$ is a tuple, we define its $\tau$-functional $$ \tau[ (X_i)_{1 \leq i \leq m}] := D[(X_i)_{1 \leq i \leq m}] + \eta \sum_{i=1}^m d[X_i; X^0].$$ -/ noncomputable def multiTau {G Ω₀ : Type*} [MeasureableFinGroup G] [MeasureSpace Ω₀] (p : multiRefPackage G Ω₀) (Ω : Fin p.m → Type*) (hΩ : ∀ i, MeasureSpace (Ω i)) (X : ∀ i, Ω i → G) : ℝ := D[X; hΩ] + p.η * ∑ i, d[ X i # p.X₀ ] -- I can't figure out how to make a τ notation due to the dependent types in the arguments. But perhaps we don't need one. Also it may be better to define multiTau in terms of probability measures on G, rather than G-valued random variables, again to avoid dependent type issues. -- had to force objects to lie in a fixed universe `u` here to avoid a compilation error
pfr/blueprint/src/chapter/torsion.tex:273
pfr/PFR/MultiTauFunctional.lean:25
PFR
multiTau_continuous
\begin{proposition}[Existence of $\tau$-minimizer]\label{tau-min-exist-multi}\lean{multiTau_continuous, multiTau_min_exists}\leanok If $G$ is finite, then a $\tau$-minimizer exists. \end{proposition} \begin{proof}\uses{tau-def-multi} This is similar to the proof of \Cref{tau-min}. \end{proof}
/-- If $G$ is finite, then a $\tau$ is continuous. -/ lemma multiTau_continuous {G Ω₀ : Type u} [MeasureableFinGroup G] [TopologicalSpace G] [DiscreteTopology G] [BorelSpace G] [MeasureSpace Ω₀] (p : multiRefPackage G Ω₀) : Continuous (fun (μ : Fin p.m → ProbabilityMeasure G) ↦ multiTau p (fun _ ↦ G) (fun i ↦ ⟨ μ i ⟩) (fun _ ↦ id)) := by sorry
pfr/blueprint/src/chapter/torsion.tex:284
pfr/PFR/MultiTauFunctional.lean:52
PFR
multiTau_min_exists
\begin{proposition}[Existence of $\tau$-minimizer]\label{tau-min-exist-multi}\lean{multiTau_continuous, multiTau_min_exists}\leanok If $G$ is finite, then a $\tau$-minimizer exists. \end{proposition} \begin{proof}\uses{tau-def-multi} This is similar to the proof of \Cref{tau-min}. \end{proof}
/-- If $G$ is finite, then a $\tau$-minimizer exists. -/ lemma multiTau_min_exists {G Ω₀ : Type u} [MeasureableFinGroup G] [MeasureSpace Ω₀] (p : multiRefPackage G Ω₀) : ∃ (Ω : Fin p.m → Type u) (hΩ : ∀ i, MeasureSpace (Ω i)) (X : ∀ i, Ω i → G), multiTauMinimizes p Ω hΩ X := by sorry
pfr/blueprint/src/chapter/torsion.tex:284
pfr/PFR/MultiTauFunctional.lean:56
PFR
multiTau_min_sum_le
\begin{proposition}[Minimizer close to reference variables]\label{tau-ref}\lean{multiTau_min_sum_le}\leanok If $(X_i)_{1 \leq i \leq m}$ is a $\tau$-minimizer, then $\sum_{i=1}^m d[X_i; X^0] \leq \frac{2m}{\eta} d[X^0; X^0]$. \end{proposition} \begin{proof}\uses{tau-min-multi, multidist-nonneg, multidist-ruzsa-III}\leanok By \Cref{tau-min-multi} we have $$ \tau[ (X_i)_{1 \leq i \leq m}] \leq \tau[ (X^0)_{1 \leq i \leq m}]$$ and hence by \Cref{tau-def-multi} and \Cref{multidist-nonneg} $$ \eta \sum_{i=1}^m d[X_i; X^0] \leq D[(X^0)_{1 \leq i \leq m}] + m d[X^0; X^0].$$ The claim now follows from \Cref{multidist-ruzsa-III}. \end{proof}
/-- If $(X_i)_{1 \leq i \leq m}$ is a $\tau$-minimizer, then $\sum_{i=1}^m d[X_i; X^0] \leq \frac{2m}{\eta} d[X^0; X^0]$. -/ lemma multiTau_min_sum_le {G Ω₀ : Type u} [hG: MeasureableFinGroup G] [hΩ₀: MeasureSpace Ω₀] (p : multiRefPackage G Ω₀) (Ω : Fin p.m → Type u) (hΩ : ∀ i, MeasureSpace (Ω i)) (hprobΩ : ∀ i, IsProbabilityMeasure (ℙ : Measure (Ω i))) (X : ∀ i, Ω i → G) (hX : ∀ i, Measurable (X i)) (h_min : multiTauMinimizes p Ω hΩ X): ∑ i, d[X i # p.X₀] ≤ 2 * p.m * p.η⁻¹ * d[p.X₀ # p.X₀] := by have hη : p.η > 0 := p.hη have hm : p.m > 0 := by linarith [p.hm] have hprob := p.hprob calc _ = p.η⁻¹ * (0 + p.η * ∑ i, d[X i # p.X₀]) := by field_simp _ ≤ p.η⁻¹ * (D[X ; hΩ] + p.η * ∑ i, d[X i # p.X₀]) := by gcongr exact multiDist_nonneg hΩ hprobΩ X hX _ ≤ p.η⁻¹ * (D[fun _ ↦ p.X₀ ; fun _ ↦ hΩ₀] + p.η * (p.m * d[p.X₀ # p.X₀])) := by apply mul_le_mul_of_nonneg_left · have ineq := h_min (fun _ ↦ Ω₀) (fun _ ↦ hΩ₀) (fun _ ↦ p.X₀) simp [multiTau] at ineq exact ineq exact inv_nonneg_of_nonneg (le_of_lt hη) _ ≤ p.η⁻¹ * (p.m * d[p.X₀ # p.X₀] + 1 * (p.m * d[p.X₀ # p.X₀])) := by gcongr · have : NeZero p.m := ⟨hm.ne'⟩ apply multidist_ruzsa_III p.hm (fun _ ↦ hΩ₀) (fun _ ↦ p.X₀) _ 0 intro _ _ simp exact ProbabilityTheory.IdentDistrib.refl ( Measurable.aemeasurable p.hmeas) · have : 0 ≤ d[p.X₀ # p.X₀] := rdist_nonneg p.hmeas p.hmeas positivity exact p.hη' _ = _ := by field_simp ring /-- If $(X_i)_{1 \leq i \leq m}$ is a $\tau$-minimizer, and $k := D[(X_i)_{1 \leq i \leq m}]$, then for any other tuple $(X'_i)_{1 \leq i \leq m}$, one has $$ k - D[(X'_i)_{1 \leq i \leq m}] \leq \eta \sum_{i=1}^m d[X_i; X'_i].$$ -/
pfr/blueprint/src/chapter/torsion.tex:290
pfr/PFR/MultiTauFunctional.lean:59
PFR
multidist_eq_zero
\begin{proposition}[Vanishing]\label{multi-zero}\lean{multidist_eq_zero}\uses{multidist-def}\leanok If $D[X_{[m]}]=0$, then for each $1 \leq i \leq m$ there is a finite subgroup $H_i \leq G$ such that $d[X_i; U_{H_i}] = 0$. \end{proposition} \begin{proof}\uses{multidist-ruzsa-I, ruzsa-nonneg, lem:100pc} From \Cref{multidist-ruzsa-I} and \Cref{ruzsa-nonneg} we have $d[X_j; X_{-k}]=0$ for all $1 \leq j,k \leq m$. The claim now follows from \Cref{lem:100pc}. \end{proof}
lemma multidist_eq_zero {m:ℕ} (hm: m ≥ 2) {Ω: Fin m → Type*} (hΩ : ∀ i, MeasureSpace (Ω i)) (X : ∀ i, (Ω i) → G) (hvanish : D[X; hΩ] = 0) : ∀ i, ∃ H : AddSubgroup G, ∃ U : (Ω i) → G, Measurable U ∧ IsUniform H U ∧ d[X i # U] = 0 := by sorry -- This is probably not the optimal spelling. For instance one could use the `μ "[|" t "]"` notation from Mathlib.ProbabilityTheory.ConditionalProbability to simplify the invocation of `ProbabilityTheory.cond` /-- If `X_[m] = (X_1, ..., X_m)` and `Y_[m] = (Y_1, ..., Y_m)` are tuples of random variables, with the `X_i` being `G`-valued (but the `Y_i` need not be), then we define `D[X_[m] | Y_[m]] = ∑_{(y_i)_{1 \leq i \leq m}} (∏ i, p_{Y_i}(y_i)) D[(X_i | Y_i = y_i)_{i=1}^m]` where each `y_i` ranges over the support of `p_{Y_i}` for `1 ≤ i ≤ m`. -/ noncomputable
pfr/blueprint/src/chapter/torsion.tex:260
pfr/PFR/MoreRuzsaDist.lean:853
PFR
multidist_ruzsa_I
\begin{lemma}[Multidistance and Ruzsa distance, I]\label{multidist-ruzsa-I}\lean{multidist_ruzsa_I}\uses{multidist-def}\leanok Let $m \ge 2$, and let $X_{[m]}$ be a tuple of $G$-valued random variables. Then $$\sum_{1 \leq j,k \leq m: j \neq k} d[X_j; -X_k] \leq m(m-1) D[X_{[m]}].$$ \end{lemma} \begin{proof}\uses{ruz-copy, sumset-lower, ruz-indep, multidist-indep, multidist-copy} By \Cref{ruz-copy}, \Cref{multidist-copy} we may take the $X_i$ to be jointly independent. From \Cref{sumset-lower}, we see that for any distinct $1 \leq j,k \leq m$, we have \[ \bbH[X_j+X_k] \leq \bbH[\sum_{i=1}^m X_i], \] and hence by \Cref{ruz-indep} \[ d[X_j;-X_k] \leq \bbH[\sum_{i=1}^m X_i] - \frac{1}{2} \bbH[X_j] - \frac{1}{2} \bbH[X_k]. \] Summing this over all pairs $(j,k)$, $j \neq k$ and using Lemma \ref{multidist-indep}, we obtain the claim. \end{proof}
lemma multidist_ruzsa_I {m:ℕ} (hm: m ≥ 2) {Ω: Fin m → Type*} (hΩ : ∀ i, MeasureSpace (Ω i)) (X : ∀ i, (Ω i) → G): ∑ j, ∑ k, (if j = k then (0:ℝ) else d[X j # X k]) ≤ m * (m-1) * D[X; hΩ] := by sorry /-- Let `m ≥ 2`, and let `X_[m]` be a tuple of `G`-valued random variables. Then `∑ j, d[X_j;X_j] ≤ 2 m D[X_[m]]`. -/
pfr/blueprint/src/chapter/torsion.tex:197
pfr/PFR/MoreRuzsaDist.lean:832
PFR
multidist_ruzsa_II
\begin{lemma}[Multidistance and Ruzsa distance, II]\label{multidist-ruzsa-II}\lean{multidist_ruzsa_II}\uses{multidist-def}\leanok Let $m \ge 2$, and let $X_{[m]}$ be a tuple of $G$-valued random variables. Then $$\sum_{j=1}^m d[X_j;X_j] \leq 2 m D[X_{[m]}].$$ \end{lemma} \begin{proof}\uses{ruzsa-triangle,multidist-ruzsa-I} From \Cref{ruzsa-triangle} we have $\dist{X_j}{X_j} \leq 2 \dist{X_j}{-X_k}$, and applying this to every summand in \Cref{multidist-ruzsa-I}, we obtain the claim. \end{proof}
lemma multidist_ruzsa_II {m:ℕ} (hm: m ≥ 2) {Ω: Fin m → Type*} (hΩ : ∀ i, MeasureSpace (Ω i)) (X : ∀ i, (Ω i) → G): ∑ j, d[X j # X j] ≤ 2 * m * D[X; hΩ] := by sorry /-- Let `I` be an indexing set of size `m ≥ 2`, and let `X_[m]` be a tuple of `G`-valued random variables. If the `X_i` all have the same distribution, then `D[X_[m]] ≤ m d[X_i;X_i]` for any `1 ≤ i ≤ m`. -/
pfr/blueprint/src/chapter/torsion.tex:214
pfr/PFR/MoreRuzsaDist.lean:837
PFR
multidist_ruzsa_III
\begin{lemma}[Multidistance and Ruzsa distance, III]\label{multidist-ruzsa-III}\lean{multidist_ruzsa_III}\uses{multidist-def}\leanok Let $m \ge 2$, and let $X_{[m]}$ be a tuple of $G$-valued random variables. If the $X_i$ all have the same distribution, then $D[X_{[m]}] \leq m d[X_i;X_i]$ for any $1 \leq i \leq m$. \end{lemma} \begin{proof}\uses{klm-1, neg-ent, ruz-indep, sumset-lower, ruz-copy, multidist-copy} By \Cref{ruz-copy}, \Cref{multidist-copy} we may take the $X_i$ to be jointly independent. Let $X_0$ be a further independent copy of the $X_i$. From \Cref{klm-1}, we have $$ \bbH[-X_0 + \sum_{i=1}^m X_i] - \bbH[-X_0] \leq \sum_{i=1}^m \bbH[X_0 - X_i] - \bbH[-X_0]$$ and hence by \Cref{neg-ent} and \Cref{ruz-indep} $$ \bbH[-X_0 + \sum_{i=1}^m X_i] - \bbH[X_0] \leq m d[X_i,X_i].$$ On the other hand, by \Cref{sumset-lower} we have $$ \bbH[\sum_{i=1}^m X_i] \leq \bbH[-X_0 + \sum_{i=1}^m X_i]$$ and the claim follows. \end{proof}
lemma multidist_ruzsa_III {m:ℕ} (hm: m ≥ 2) {Ω: Fin m → Type*} (hΩ : ∀ i, MeasureSpace (Ω i)) (X : ∀ i, (Ω i) → G) (hidenT : ∀ j k, IdentDistrib (X j) (X k)): ∀ i, D[X; hΩ] ≤ m * d[X i # X i] := by sorry /-- Let `m ≥ 2`, and let `X_[m]` be a tuple of `G`-valued random variables. Let `W := ∑ X_i`. Then `d[W;-W] ≤ 2 D[X_i]`. -/
pfr/blueprint/src/chapter/torsion.tex:223
pfr/PFR/MoreRuzsaDist.lean:843
PFR
multidist_ruzsa_IV
\begin{lemma}[Multidistance and Ruzsa distance, IV]\label{multidist-ruzsa-IV}\lean{multidist_ruzsa_IV}\uses{multidist-def}\leanok Let $m \ge 2$, and let $X_{[m]}$ be a tuple of independent $G$-valued random variables. Let $W := \sum_{i=1}^m X_i$. Then $$ d[W;-W] \leq 2 D[X_i].$$ \end{lemma} \begin{proof}\uses{independent-exist, kv, sumset-lower, ruz-indep} Take $(X'_i)_{1 \leq i \leq m}$ to be further independent copies of $(X_i)_{1 \leq i \leq m}$ (which exist by \Cref{independent-exist}), and write $W' := \sum_{i=1}^m X'_i$. Fix any distinct $a,b \in I$. From \Cref{kv} one has \begin{equation}\label{7922} \bbH[W + W'] \leq \bbH[W] + \bbH[X_{a} + W'] - \bbH[X_{a}] \end{equation} and also \[ \bbH[X_a + W'] \leq \bbH[X_a + X_b] + \bbH[W'] - \bbH[X'_b].\] Combining this with~\eqref{7922} and then applying \Cref{sumset-lower} we have \begin{align*} \bbH[W + W'] & \leq 2\bbH[W] + \bbH[X_a + X_b] - \bbH[X_a] - \bbH[X_b] \\ & \leq 3 \bbH[W] - \bbH[X_a] - \bbH[X_b]. \end{align*} Averaging this over all choices of $(a,b)$ gives $\bbH[W] + 2 D[X_{[m]}]$, and the claim follows from \Cref{ruz-indep}. \end{proof}
lemma multidist_ruzsa_IV {m:ℕ} (hm: m ≥ 2) {Ω : Type*} (hΩ : MeasureSpace Ω) (X : Fin m → Ω → G) (h_indep : iIndepFun X) : d[∑ i, X i # ∑ i, X i] ≤ 2 * D[X; fun _ ↦ hΩ] := by sorry /-- If `D[X_[m]]=0`, then for each `i ∈ I` there is a finite subgroup `H_i ≤ G` such that `d[X_i; U_{H_i}] = 0`. -/
pfr/blueprint/src/chapter/torsion.tex:238
pfr/PFR/MoreRuzsaDist.lean:848
PFR
mutual_information_le
\begin{proposition}[Bounding mutual information]\label{key}\lean{mutual_information_le}\leanok Suppose that $X_{i,j}$, $1 \leq i,j \leq m$, are jointly independent $G$-valued random variables, such that for each $j = 1,\dots,m$, the random variables $(X_{i,j})_{i = 1}^m$ coincide in distribution with some permutation of $X_{[m]}$. Write \[ {\mathcal I} := \bbI[ \bigl(\sum_{i=1}^m X_{i,j}\bigr)_{j =1}^{m} : \bigl(\sum_{j=1}^m X_{i,j}\bigr)_{i = 1}^m \; \big| \; \sum_{i=1}^m \sum_{j = 1}^m X_{i,j} ]. \] Then \begin{equation}\label{I-ineq} {\mathcal I} \leq 4 m^2 \eta k. \end{equation} \end{proposition} \begin{proof}\uses{cor-multid, multidist-perm, cond-multidist-lower-II, first-useful, klm-3, sumset-lower, ruzsa-triangle, compare-sums, multidist-ruzsa-II} For each $j \in \{1,\dots,m\}$ we call the tuple $(X_{i,j})_{i = 1}^m$ a \emph{column} and for each $i \in \{1,\dots, m\}$ we call the tuple $(X_{i,j})_{j = 1}^m$ a \emph{row}. Hence, by hypothesis, each column is a permutation of $X_{[m]} = (X_i)_{i=1}^m$. From \Cref{cor-multid} we have \begin{equation}\label{441} {\mathcal I} \leq \sum_{j=1}^{m-1} A_j + B,\end{equation} where \[ A_j := D[ (X_{i, j})_{i = 1}^m] - D[ (X_{i, j})_{i = 1}^m \; \big| \; (X_{i,j} + \cdots + X_{i,m})_{i =1}^m ] \] and \[ B := D[ (X_{i,m})_{i=1}^m ] - D[ \bigl(\sum_{j=1}^m X_{i,j}\bigr)_{i=1}^m ]. \] We first consider the $A_j$, for fixed $j \in \{1,\dots, m-1\}$. By \Cref{multidist-perm} and and our hypothesis on columns, we have \[ D[ (X_{i, j})_{i = 1}^m ]= D[ (X_i)_{i=1}^m ] = k. \] Let $\sigma = \sigma_j \colon I \to I$ be a permutation such that $X_{i,j} = X_{\sigma(i)}$, and write $X'_i := X_{i,j}$ and $Y_i := X_{i,j} + \cdots + X_{i,m}$. \Cref{cond-multidist-lower-II}, we have \begin{align} A_j & \leq \eta \sum_{i = 1}^m d[X_{i,j}; X_{i, j}|X_{i, j} + \cdots + X_{i,m}].\label{54a} \end{align} We similarly consider $B$. By \Cref{multidist-perm} applied to the $m$-th column, \[ D[ (X_{i, m})_{i = 1}^m ] = D[X_{[m]}] = k. \] For $1 \leq i \leq m$, denote the sum of row $i$ by \[ V_i := \sum_{j=1}^m X_{i,j}; \] if we apply \Cref{cond-multidist-lower-II} again, now with $X_{\sigma(i)} = X_{i,m}$, $X'_i := V_i$, and with the variable $Y_i$ being trivial, we obtain \begin{equation}\label{55a} B \leq \eta \sum_{i = 1}^m d[X_{i,m}; V_i]. \end{equation} It remains to bound the distances appearing in~\eqref{54a} and~\eqref{55a} further using Ruzsa calculus. For $1 \leq j \leq m-1$ and $1 \leq i \leq m$, by \Cref{first-useful} we have \begin{align*} &d[ X_{i,j}; X_{i,j}| X_{i,j}+\cdots+X_{i,m}] \leq d[X_{i,j}; X_{i,j}] \\ &\quad + \tfrac{1}{2} \bigl(\bbH[X_{i,j}+\cdots+X_{i,m}] - \bbH[X_{i,{j+1}}+\cdots+X_{i,m}]\bigr). \end{align*} For each $i$, summing over $j = 1,\dots, m-1$ gives \begin{align} \nonumber &\sum_{j=1}^{m-1} d[X_{i,j}; X_{i,j}| X_{i,j}+\cdots+X_{i,m}] \\ &\qquad \leq \sum_{j=1}^{m-1} d[X_{i,j}; X_{i,j}] + \frac12 \bigl( \bbH[V_i] - \bbH[X_{i,m}] \bigr). \label{eq:distbnd1} \end{align} On the other hand, by \Cref{klm-3} (since $X_{i,m}$ appears in the sum $V_i$) we have \begin{align} d[X_{i,m}; V_i] &\leq d[X_{i,m}; X_{i,m}] + \frac12 \bigl( \bbH[V_i] - \bbH[X_{i,m}] \bigr). \label{eq:distbnd2} \end{align} Combining~\eqref{441},~\eqref{54a} and~\eqref{55a} with~\eqref{eq:distbnd1} and~\eqref{eq:distbnd2} (the latter two summed over $i$), we get \begin{align} \nonumber \frac1{\eta} {\mathcal I} &\leq \sum_{i,j=1}^m d[X_{i,j};X_{i,j}] + \sum_{i=1}^m (\bbH[V_i] - \bbH[X_{i,m}]) \\ &= m \sum_{i=1}^m d[X_i; X_i] + \sum_{i=1}^m \bbH[V_i] - \sum_{i=1}^m \bbH[X_i]. \label{eq:distbnd3} \end{align} By \Cref{compare-sums} (with $f$ taking each $j$ to the index $j'$ such that $X_{i,j}$ is a copy of $X_{j'}$) we obtain the bound \[ \bbH[V_i] \leq \bbH[\sum_{j=1}^m X_j] + \sum_{j=1}^m d[X_{i,j}; X_{i,j}]. \] Finally, summing over $i$ and using $D[X_{[m]}] = k$ gives \begin{align*} \sum_{i=1}^m \bbH[V_i] - \sum_{i=1}^m \bbH[X_i] & \leq \sum_{i,j=1}^m d[X_{i,j}; X_{i,j}] + m k \\ & = m\sum_{i = 1}^m d[X_i; X_i] + mk, \end{align*} where in the second step we used the permutation hypothesis. Combining this with~\eqref{eq:distbnd3} gives the $$ {\mathcal I} \leq 2\eta m \biggl( \sum_{i=1}^m d[X_i;X_i] \biggr).$$ The claim \eqref{I-ineq} is now immediate from \Cref{multidist-ruzsa-II}. \end{proof}
lemma mutual_information_le {G Ωₒ : Type u} [MeasureableFinGroup G] [MeasureSpace Ωₒ] (p : multiRefPackage G Ωₒ) (Ω : Type u) [hΩ: MeasureSpace Ω] (X : ∀ i, Ω → G) (h_indep : iIndepFun X) (h_min : multiTauMinimizes p (fun _ ↦ Ω) (fun _ ↦ hΩ) X) (Ω' : Type*) [MeasureSpace Ω'] (X' : Fin p.m × Fin p.m → Ω' → G) (h_indep': iIndepFun X') (hperm : ∀ j, ∃ e : Fin p.m ≃ Fin p.m, IdentDistrib (fun ω ↦ (fun i ↦ X' (i, j) ω)) (fun ω ↦ (fun i ↦ X (e i) ω))) : I[ fun ω ↦ ( fun j ↦ ∑ i, X' (i, j) ω) : fun ω ↦ ( fun i ↦ ∑ j, X' (i, j) ω) | fun ω ↦ ∑ i, ∑ j, X' (i, j) ω ] ≤ 4 * p.m^2 * p.η * D[ X; (fun _ ↦ hΩ)] := sorry
pfr/blueprint/src/chapter/torsion.tex:486
pfr/PFR/BoundingMutual.lean:28
PFR
mutual_information_le_t_12
\begin{proposition}[Mutual information bound]\label{prop:52}\lean{mutual_information_le_t_12, mutual_information_le_t_13, mutual_information_le_t_23}\uses{more-random}\leanok We have \[ \bbI[Z_1 : Z_2\, |\, W],\ \bbI[Z_2 : Z_3\, |\, W],\ \bbI[Z_1 : Z_3\, |\, W] \leq t \] where \begin{equation}\label{t-def} t := 4m^2 \eta k. \end{equation} \end{proposition} \begin{proof}\uses{key, data-process} We analyze these variables by \Cref{key} in several different ways. In the first application, take $X_{i,j}=Y_{i,j}$. Note that each column $(X_{i,j})_{i=1}^m$ is indeed a permutation of $X_1,\dots,X_m$; in fact, the trivial permutation. Note also that for each $i \in \Z/m\Z$, the row sum is \[ \sum_{j=1}^m X_{i,j} = \sum_{j \in \Z/m\Z} Y_{i,j} = P_i \] and for each $j \in \Z/m\Z$, the column sum is \[ \sum_{i=1}^m X_{i,j} = \sum_{i \in \Z/m\Z} Y_{i,j} = Q_j. \] Finally note that $\sum_{i,j=1}^m X_{i,j} = W$. From \Cref{key} we then have \[ \bbI[ (P_i)_{i \in \Z/m\Z} : (Q_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t, \] with $t$ as in~\eqref{t-def}. Since $Z_1$ is a function of $(P_i)_{i \in \Z/m\Z}$ by~\eqref{pqr-defs}, and similarly $Z_2$ is a function of $(Q_j)_{j \in \Z/m\Z}$, it follows immediately from \Cref{data-process} that \[ \bbI[ Z_1 : Z_2 \,|\, W ] \leq t. \] In the second application of \Cref{key}, we instead consider $X'_{i,j} = Y_{i-j,j}$. Again, for each fixed $j$, the tuple $(X'_{i,j})_{i=1}^m$ is a permutation of $X_1,\dots,X_m$. This time the row sums for $i \in \{1,\dots, m\}$ are \[ \sum_{j=1}^m X'_{i,j} = \sum_{j \in \Z/m\Z} Y_{i-j,j} = R_{-i}. \] Similarly, the column sums for $j \in \{1,\dots, m\}$ are \[ \sum_{i=1}^m X'_{i,j} = \sum_{i \in \Z/m\Z} Y_{i-j,j} = Q_j. \] As before, $\sum_{i,j=1}^m X'_{i,j} = W$. Hence, using~\eqref{pqr-defs} and \Cref{data-process} again, \Cref{key} tells us \[ \bbI[ Z_3 : Z_2 \,|\, W] \leq \bbI[ (R_i)_{i \in \Z/m\Z} : (Q_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t. \] In the third application\footnote{In fact, by permuting the variables $(Y_{i,j})_{i,j \in \Z/m\Z}$, one can see that the random variables $(W, Z_1, Z_2)$ and $(W, Z_1, Z_3)$ have the same distribution, so this is in some sense identical to -- and can be deduced from -- the first application.} of \Cref{key}, take $X''_{i,j} = Y_{i,j-i}$. The column and row sums are respectively \[ \sum_{j=1}^m X''_{i,j} = \sum_{j \in \Z/m\Z} Y_{i,j-i} = P_i \] and \[ \sum_{i=1}^m X''_{i,j} = \sum_{i \in \Z/m\Z} Y_{i,j-i} = R_{-j}. \] Hence, \Cref{key} and \Cref{data-process} give \[ \bbI[ Z_1 : Z_3 \,|\, W] \leq \bbI[ (P_i)_{i \in \Z/m\Z} : (R_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t, \] which completes the proof. \end{proof}
lemma mutual_information_le_t_12 : I[Z1 : Z2 | W] ≤ 4 * p.m ^ 2 * p.η * k := sorry
pfr/blueprint/src/chapter/torsion.tex:608
pfr/PFR/TorsionEndgame.lean:50
PFR
mutual_information_le_t_13
\begin{proposition}[Mutual information bound]\label{prop:52}\lean{mutual_information_le_t_12, mutual_information_le_t_13, mutual_information_le_t_23}\uses{more-random}\leanok We have \[ \bbI[Z_1 : Z_2\, |\, W],\ \bbI[Z_2 : Z_3\, |\, W],\ \bbI[Z_1 : Z_3\, |\, W] \leq t \] where \begin{equation}\label{t-def} t := 4m^2 \eta k. \end{equation} \end{proposition} \begin{proof}\uses{key, data-process} We analyze these variables by \Cref{key} in several different ways. In the first application, take $X_{i,j}=Y_{i,j}$. Note that each column $(X_{i,j})_{i=1}^m$ is indeed a permutation of $X_1,\dots,X_m$; in fact, the trivial permutation. Note also that for each $i \in \Z/m\Z$, the row sum is \[ \sum_{j=1}^m X_{i,j} = \sum_{j \in \Z/m\Z} Y_{i,j} = P_i \] and for each $j \in \Z/m\Z$, the column sum is \[ \sum_{i=1}^m X_{i,j} = \sum_{i \in \Z/m\Z} Y_{i,j} = Q_j. \] Finally note that $\sum_{i,j=1}^m X_{i,j} = W$. From \Cref{key} we then have \[ \bbI[ (P_i)_{i \in \Z/m\Z} : (Q_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t, \] with $t$ as in~\eqref{t-def}. Since $Z_1$ is a function of $(P_i)_{i \in \Z/m\Z}$ by~\eqref{pqr-defs}, and similarly $Z_2$ is a function of $(Q_j)_{j \in \Z/m\Z}$, it follows immediately from \Cref{data-process} that \[ \bbI[ Z_1 : Z_2 \,|\, W ] \leq t. \] In the second application of \Cref{key}, we instead consider $X'_{i,j} = Y_{i-j,j}$. Again, for each fixed $j$, the tuple $(X'_{i,j})_{i=1}^m$ is a permutation of $X_1,\dots,X_m$. This time the row sums for $i \in \{1,\dots, m\}$ are \[ \sum_{j=1}^m X'_{i,j} = \sum_{j \in \Z/m\Z} Y_{i-j,j} = R_{-i}. \] Similarly, the column sums for $j \in \{1,\dots, m\}$ are \[ \sum_{i=1}^m X'_{i,j} = \sum_{i \in \Z/m\Z} Y_{i-j,j} = Q_j. \] As before, $\sum_{i,j=1}^m X'_{i,j} = W$. Hence, using~\eqref{pqr-defs} and \Cref{data-process} again, \Cref{key} tells us \[ \bbI[ Z_3 : Z_2 \,|\, W] \leq \bbI[ (R_i)_{i \in \Z/m\Z} : (Q_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t. \] In the third application\footnote{In fact, by permuting the variables $(Y_{i,j})_{i,j \in \Z/m\Z}$, one can see that the random variables $(W, Z_1, Z_2)$ and $(W, Z_1, Z_3)$ have the same distribution, so this is in some sense identical to -- and can be deduced from -- the first application.} of \Cref{key}, take $X''_{i,j} = Y_{i,j-i}$. The column and row sums are respectively \[ \sum_{j=1}^m X''_{i,j} = \sum_{j \in \Z/m\Z} Y_{i,j-i} = P_i \] and \[ \sum_{i=1}^m X''_{i,j} = \sum_{i \in \Z/m\Z} Y_{i,j-i} = R_{-j}. \] Hence, \Cref{key} and \Cref{data-process} give \[ \bbI[ Z_1 : Z_3 \,|\, W] \leq \bbI[ (P_i)_{i \in \Z/m\Z} : (R_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t, \] which completes the proof. \end{proof}
lemma mutual_information_le_t_13 : I[Z1 : Z3 | W] ≤ 4 * p.m ^ 2 * p.η * k := sorry
pfr/blueprint/src/chapter/torsion.tex:608
pfr/PFR/TorsionEndgame.lean:52
PFR
mutual_information_le_t_23
\begin{proposition}[Mutual information bound]\label{prop:52}\lean{mutual_information_le_t_12, mutual_information_le_t_13, mutual_information_le_t_23}\uses{more-random}\leanok We have \[ \bbI[Z_1 : Z_2\, |\, W],\ \bbI[Z_2 : Z_3\, |\, W],\ \bbI[Z_1 : Z_3\, |\, W] \leq t \] where \begin{equation}\label{t-def} t := 4m^2 \eta k. \end{equation} \end{proposition} \begin{proof}\uses{key, data-process} We analyze these variables by \Cref{key} in several different ways. In the first application, take $X_{i,j}=Y_{i,j}$. Note that each column $(X_{i,j})_{i=1}^m$ is indeed a permutation of $X_1,\dots,X_m$; in fact, the trivial permutation. Note also that for each $i \in \Z/m\Z$, the row sum is \[ \sum_{j=1}^m X_{i,j} = \sum_{j \in \Z/m\Z} Y_{i,j} = P_i \] and for each $j \in \Z/m\Z$, the column sum is \[ \sum_{i=1}^m X_{i,j} = \sum_{i \in \Z/m\Z} Y_{i,j} = Q_j. \] Finally note that $\sum_{i,j=1}^m X_{i,j} = W$. From \Cref{key} we then have \[ \bbI[ (P_i)_{i \in \Z/m\Z} : (Q_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t, \] with $t$ as in~\eqref{t-def}. Since $Z_1$ is a function of $(P_i)_{i \in \Z/m\Z}$ by~\eqref{pqr-defs}, and similarly $Z_2$ is a function of $(Q_j)_{j \in \Z/m\Z}$, it follows immediately from \Cref{data-process} that \[ \bbI[ Z_1 : Z_2 \,|\, W ] \leq t. \] In the second application of \Cref{key}, we instead consider $X'_{i,j} = Y_{i-j,j}$. Again, for each fixed $j$, the tuple $(X'_{i,j})_{i=1}^m$ is a permutation of $X_1,\dots,X_m$. This time the row sums for $i \in \{1,\dots, m\}$ are \[ \sum_{j=1}^m X'_{i,j} = \sum_{j \in \Z/m\Z} Y_{i-j,j} = R_{-i}. \] Similarly, the column sums for $j \in \{1,\dots, m\}$ are \[ \sum_{i=1}^m X'_{i,j} = \sum_{i \in \Z/m\Z} Y_{i-j,j} = Q_j. \] As before, $\sum_{i,j=1}^m X'_{i,j} = W$. Hence, using~\eqref{pqr-defs} and \Cref{data-process} again, \Cref{key} tells us \[ \bbI[ Z_3 : Z_2 \,|\, W] \leq \bbI[ (R_i)_{i \in \Z/m\Z} : (Q_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t. \] In the third application\footnote{In fact, by permuting the variables $(Y_{i,j})_{i,j \in \Z/m\Z}$, one can see that the random variables $(W, Z_1, Z_2)$ and $(W, Z_1, Z_3)$ have the same distribution, so this is in some sense identical to -- and can be deduced from -- the first application.} of \Cref{key}, take $X''_{i,j} = Y_{i,j-i}$. The column and row sums are respectively \[ \sum_{j=1}^m X''_{i,j} = \sum_{j \in \Z/m\Z} Y_{i,j-i} = P_i \] and \[ \sum_{i=1}^m X''_{i,j} = \sum_{i \in \Z/m\Z} Y_{i,j-i} = R_{-j}. \] Hence, \Cref{key} and \Cref{data-process} give \[ \bbI[ Z_1 : Z_3 \,|\, W] \leq \bbI[ (P_i)_{i \in \Z/m\Z} : (R_j)_{j \in \Z/m\Z } \,\big|\, W ] \leq t, \] which completes the proof. \end{proof}
lemma mutual_information_le_t_23 : I[Z2 : Z3 | W] ≤ 4 * p.m ^ 2 * p.η * k := sorry
pfr/blueprint/src/chapter/torsion.tex:608
pfr/PFR/TorsionEndgame.lean:54
PFR
mutual_of_W_Z_two_le
\begin{lemma}[Mutual information bound]\label{mutual-w-z2}\lean{mutual_of_W_Z_two_le}\leanok We have $\bbI[W : Z_2] \leq 2 (m-1) k$. \end{lemma} \begin{proof}\uses{alternative-mutual, ent-w} From \Cref{alternative-mutual} we have $\bbI[W : Z_2] = \bbH[W] - \bbH[W | Z_2]$, and since $Z_2 = \sum_{j=1}^{m-1} j Q_j$ and $W = \sum_{j=1}^m Q_j$, \[ \bbH[W | Z_2] \geq \bbH[W \,|\, Q_1,\dots,Q_{m-1}] = \bbH[Q_m] = \bbH[S]. \] Hence, by \Cref{ent-w}, \[ \bbI[W : Z_2] \leq \bbH[W] - \bbH[S] \leq 2 (m-1) k, \] as claimed. \end{proof}
/-- We have $\bbI[W : Z_2] \leq 2 (m-1) k$. -/ lemma mutual_of_W_Z_two_le : I[W : Z2] ≤ 2 * (p.m-1) * k := sorry
pfr/blueprint/src/chapter/torsion.tex:720
pfr/PFR/TorsionEndgame.lean:63
PFR
phiMinimizes
\begin{definition}\label{phi-min-def}\lean{phiMinimizes}\leanok Given $G$-valued random variables $X,Y$, define $$ \phi[X;Y] := d[X;Y] + \eta(\rho(X) + \rho(Y))$$ and define a \emph{$\phi$-minimizer} to be a pair of random variables $X,Y$ which minimizes $\phi[X;Y]$. \end{definition}
def phiMinimizes {Ω : Type*} [MeasurableSpace Ω] (X Y : Ω → G) (η : ℝ) (A : Finset G) (μ : Measure Ω) : Prop := ∀ (Ω' : Type uG) (_ : MeasureSpace Ω') (X' Y' : Ω' → G), IsProbabilityMeasure (ℙ : Measure Ω') → Measurable X' → Measurable Y' → phi X Y η A μ ≤ phi X' Y' η A ℙ
pfr/blueprint/src/chapter/further_improvement.tex:211
pfr/PFR/RhoFunctional.lean:1131
PFR
phi_min_exists
\begin{lemma}[$\phi$-minimizers exist]\label{phi-min-exist}\lean{phi_min_exists}\leanok There exists a $\phi$-minimizer. \end{lemma} \begin{proof}\leanok\uses{rho-cts} Clear from compactness. \end{proof}
/-- There exists a $\phi$-minimizer. -/ lemma phi_min_exists (hA : A.Nonempty) : ∃ (μ : Measure (G × G)), IsProbabilityMeasure μ ∧ phiMinimizes Prod.fst Prod.snd η A μ := by let _i : TopologicalSpace G := (⊥ : TopologicalSpace G) have : DiscreteTopology G := ⟨rfl⟩ let iG : Inhabited G := ⟨0⟩ have T : Continuous (fun (μ : ProbabilityMeasure (G × G)) ↦ phi Prod.fst Prod.snd η A μ) := by apply continuous_iff_continuousAt.2 (fun μ ↦ ?_) apply Tendsto.add · apply tendsto_rdist_probabilityMeasure continuous_fst continuous_snd tendsto_id apply Tendsto.const_mul apply Tendsto.add · apply tendsto_rho_probabilityMeasure continuous_fst hA tendsto_id · apply tendsto_rho_probabilityMeasure continuous_snd hA tendsto_id obtain ⟨μ, _, hμ⟩ := @IsCompact.exists_isMinOn ℝ (ProbabilityMeasure (G × G)) _ _ _ _ Set.univ isCompact_univ ⟨default, trivial⟩ _ T.continuousOn refine ⟨μ, by infer_instance, ?_⟩ intro Ω' mΩ' X' Y' hP hX' hY' let ν : Measure (G × G) := Measure.map (⟨X', Y'⟩) ℙ have : IsProbabilityMeasure ν := isProbabilityMeasure_map (by fun_prop) let ν' : ProbabilityMeasure (G × G) := ⟨ν, this⟩ have : phi Prod.fst Prod.snd η A ↑μ ≤ phi Prod.fst Prod.snd η A ↑ν' := hμ (mem_univ _) apply this.trans_eq have h₁ : IdentDistrib Prod.fst X' (ν' : Measure (G × G)) ℙ := by refine ⟨measurable_fst.aemeasurable, hX'.aemeasurable, ?_⟩ simp only [ProbabilityMeasure.coe_mk, ν', ν] rw [Measure.map_map measurable_fst (by fun_prop)] rfl have h₂ : IdentDistrib Prod.snd Y' (ν' : Measure (G × G)) ℙ := by refine ⟨measurable_snd.aemeasurable, hY'.aemeasurable, ?_⟩ simp only [ProbabilityMeasure.coe_mk, ν', ν] rw [Measure.map_map measurable_snd (by fun_prop)] rfl simp [phi, h₁.rdist_eq h₂, rho_eq_of_identDistrib h₁, rho_eq_of_identDistrib h₂] -- Let $(X_1, X_2)$ be a $\phi$-minimizer, and $\tilde X_1, \tilde X_2$ be independent copies -- of $X_1,X_2$ respectively. variable {X₁ X₂ X₁' X₂' : Ω → G} (h_min : phiMinimizes X₁ X₂ η A ℙ) (h₁ : IdentDistrib X₁ X₁') (h₂ : IdentDistrib X₂ X₂') (h_indep : iIndepFun ![X₁, X₂, X₁', X₂']) (hX₁ : Measurable X₁) (hX₂ : Measurable X₂) (hX₁' : Measurable X₁') (hX₂' : Measurable X₂') local notation3 "I₁" => I[X₁ + X₂ : X₁' + X₂ | X₁ + X₂ + X₁' + X₂'] local notation3 "I₂" => I[X₁ + X₂ : X₁ + X₁' | X₁ + X₂ + X₁' + X₂'] /-- `k := d[X₁ # X₂]`, the Ruzsa distance `rdist` between X₁ and X₂. -/ local notation3 "k" => d[X₁ # X₂]
pfr/blueprint/src/chapter/further_improvement.tex:216
pfr/PFR/RhoFunctional.lean:1156
PFR
rdist_add_rdist_add_condMutual_eq
\begin{lemma}[Fibring identity for first estimate]\label{first-fibre} \lean{rdist_add_rdist_add_condMutual_eq}\leanok We have \begin{align*} & d[X_1+\tilde X_2;X_2+\tilde X_1] + d[X_1|X_1+\tilde X_2; X_2|X_2+\tilde X_1] \\ &\quad + \bbI[X_1+ X_2 : \tilde X_1 + X_2 \,|\, X_1 + X_2 + \tilde X_1 + \tilde X_2] = 2k. \end{align*} \end{lemma} \begin{proof}\uses{cor-fibre} \leanok Immediate from \Cref{cor-fibre}. \end{proof}
lemma rdist_add_rdist_add_condMutual_eq [Module (ZMod 2) G] : d[X₁ + X₂' # X₂ + X₁'] + d[X₁ | X₁ + X₂' # X₂ | X₂ + X₁'] + I[X₁ + X₂ : X₁' + X₂ | X₁ + X₂ + X₁' + X₂'] = 2 * k := by have h0 : ![X₁, X₂, X₂', X₁'] 0 = X₁ := rfl have h1 : ![X₁, X₂, X₂', X₁'] 1 = X₂ := rfl have h2 : ![X₁, X₂, X₂', X₁'] 2 = X₂' := rfl have h3 : ![X₁, X₂, X₂', X₁'] 3 = X₁' := rfl have h := sum_of_rdist_eq_char_2 ![X₁, X₂, X₂', X₁'] h_indep (fun i => by fin_cases i <;> assumption) rw [h0, h1, h2, h3] at h have heq : d[X₂' # X₁'] = k := by rw [rdist_symm] apply h₁.symm.rdist_eq h₂.symm rw [heq] at h convert h.symm using 1 · congr 2 <;> abel · ring include h_min hX₁ hX₂ hX₁' hX₂' in /-- The distance $d[X_1+\tilde X_2; X_2+\tilde X_1]$ is at least $$ k - \eta (d[X^0_1; X_1+\tilde X_2] - d[X^0_1; X_1]) - \eta (d[X^0_2; X_2+\tilde X_1] - d[X^0_2; X_2]).$$ -/
pfr/blueprint/src/chapter/entropy_pfr.tex:79
pfr/PFR/FirstEstimate.lean:53
PFR
rdist_add_rdist_eq
\begin{lemma}\label{I1-I2-diff}\lean{rdist_add_rdist_eq}\leanok $d[X_1;X_1]+d[X_2;X_2]= 2d[X_1;X_2]+(I_2-I_1)$. \end{lemma} \begin{proof}\leanok \uses{first-fibre,cor-fibre} Compare \Cref{first-fibre} with the identity obtained from applying \Cref{cor-fibre} on $(X_1,\tilde X_1, X_2, \tilde X_2)$. \end{proof}
/-- $d[X_1;X_1]+d[X_2;X_2]= 2d[X_1;X_2]+(I_2-I_1)$. -/ lemma rdist_add_rdist_eq : d[ X₁ # X₁ ] + d[ X₂ # X₂ ] = 2 * k + (I₂ - I₁) := by have : d[X₁ + X₂' # X₂ + X₁'] + d[X₁ | X₁ + X₂' # X₂ | X₂ + X₁'] + I₁ = 2 * k := rdist_add_rdist_add_condMutual_eq _ _ _ _ hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep.reindex_four_abdc have : d[X₁ # X₁] + d[X₂ # X₂] = d[X₁ + X₂' # X₂ + X₁'] + d[X₁ | X₁ + X₂' # X₂ | X₂ + X₁'] + I₂ := I_two_aux h₁ h₂ h_indep hX₁ hX₂ hX₁' hX₂' linarith include hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep in
pfr/blueprint/src/chapter/further_improvement.tex:236
pfr/PFR/RhoFunctional.lean:1380
PFR
rdist_def
\begin{definition}[Ruzsa distance]\label{ruz-dist-def} \uses{entropy-def, independent-exist} \lean{rdist_def}\leanok Let $X,Y$ be $G$-valued random variables (not necessarily on the same sample space). The \emph{Ruzsa distance} $d[X ;Y]$ between $X$ and $Y$ is defined to be $$ d[X ;Y] := \bbH[X' - Y'] - \bbH[X']/2 - \bbH[Y']/2$$ where $X',Y'$ are (the canonical) independent copies of $X,Y$ from \Cref{independent-exist}. \end{definition}
/-- Explicit formula for the Ruzsa distance. -/ lemma rdist_def (X : Ω → G) (Y : Ω' → G) (μ : Measure Ω) (μ' : Measure Ω') : d[X ; μ # Y ; μ'] = H[fun x ↦ x.1 - x.2 ; (μ.map X).prod (μ'.map Y)] - H[X ; μ]/2 - H[Y ; μ']/2 := rfl
pfr/blueprint/src/chapter/distance.tex:80
pfr/PFR/ForMathlib/Entropy/RuzsaDist.lean:72
PFR
rdist_le_sum_fibre
\begin{proposition}[General fibring identity]\label{fibring-ident} \lean{rdist_of_indep_eq_sum_fibre, rdist_le_sum_fibre}\leanok Let $\pi : H \to H'$ be a homomorphism additive groups, and let $Z_1,Z_2$ be $H$-valued random variables. Then we have \[ d[Z_1; Z_2] \geq d[\pi(Z_1);\pi(Z_2)] + d[Z_1|\pi(Z_1); Z_2 |\pi(Z_2)]. \] Moreover, if $Z_1,Z_2$ are taken to be independent, then the difference between the two sides is $$I( Z_1 - Z_2 : (\pi(Z_1), \pi(Z_2)) | \pi(Z_1 - Z_2) ).$$ \end{proposition} \begin{proof}\uses{ruz-copy, independent-exist, submodularity,conditional-mutual-alt,chain-rule,relabeled-entropy, cond-dist-alt}\leanok Let $Z_1,Z_2$ be independent throughout (this is possible by \Cref{ruz-copy} and \Cref{independent-exist}). By \Cref{cond-dist-alt}, We have \begin{align*} & d[Z_1 |\pi(Z_1); Z_2 |\pi(Z_2)] \\ & = \bbH[Z_1 - Z_2 | \pi(Z_1),\pi(Z_2)] - \tfrac{1}{2} \bbH[Z_1 | \pi(Z_1)] - \tfrac{1}{2} \bbH[Z_2 | \pi(Z_2)] \\ & \leq \bbH[Z_1 - Z_2 | \pi(Z_1+Z_2)] - \tfrac{1}{2} \bbH[Z_1 | \pi(Z_1)] - \tfrac{1}{2}H[Z_2 | \pi(Z_2)] \\ & = d[Z_1;Z_2] - d[\pi(Z_1);\pi(Z_2)]. \end{align*} In the middle step, we used \Cref{submodularity}, and in the last step we used the fact that \[\bbH[Z_1 - Z_2 | \pi(Z_1-Z_2)] = \bbH[Z_1 - Z_2] - \bbH[\pi(Z_1-Z_2)]\] (thanks to \Cref{chain-rule} and \Cref{relabeled-entropy}) and that \[\bbH[Z_i| \pi(Z_i)] = \bbH[Z_i] - \bbH[\pi(Z_i)]\] (since $Z_i$ determines $\pi(Z_i)$). This gives the claimed inequality. The difference between the two sides is precisely \[\bbH[Z_1 - Z_2 | \pi(Z_1 - Z_2)] - \bbH[Z_1 - Z_2 | \pi(Z_1),\pi(Z_2)].\] To rewrite this in terms of (conditional) mutual information, we use the identity \[\bbH[A|B] - \bbH[A | B,C] = \bbI[A : C | B],\] (which follows \Cref{conditional-mutual-alt}) taking $A := Z_1 - Z_2$, $B := \pi(Z_1 - Z_2)$ and $C := (\pi(Z_1),\pi(Z_{2}))$, and noting that in this case $\bbH[A | B,C] = \bbH[A | C]$ since $C$ uniquely determines $B$ (this may require another helper lemma about entropy). This completes the proof. \end{proof}
lemma rdist_le_sum_fibre {Z_1 : Ω → H} {Z_2 : Ω' → H} (h1 : Measurable Z_1) (h2 : Measurable Z_2) [FiniteRange Z_1] [FiniteRange Z_2] : d[π ∘ Z_1; μ # π ∘ Z_2; μ'] + d[Z_1|π∘Z_1; μ # Z_2|π∘Z_2; μ'] ≤ d[Z_1; μ # Z_2; μ']:= by obtain ⟨ν, W_1, W_2, hν, m1, m2, hi, hi1, hi2, _, _⟩ := ProbabilityTheory.independent_copies_finiteRange h1 h2 μ μ' have hπ : Measurable π := .of_discrete have hφ : Measurable (fun x ↦ (x, π x)) := .of_discrete have hπ1 : IdentDistrib (⟨Z_1, π ∘ Z_1⟩) (⟨W_1, π ∘ W_1⟩) μ ν := hi1.symm.comp hφ have hπ2 : IdentDistrib (⟨Z_2, π ∘ Z_2⟩) (⟨W_2, π ∘ W_2⟩) μ' ν := hi2.symm.comp hφ rw [← hi1.rdist_eq hi2, ← (hi1.comp hπ).rdist_eq (hi2.comp hπ), rdist_of_indep_eq_sum_fibre π hi m1 m2, condRuzsaDist_of_copy h1 (hπ.comp h1) h2 (hπ.comp h2) m1 (hπ.comp m1) m2 (hπ.comp m2) hπ1 hπ2] exact le_add_of_nonneg_right (condMutualInfo_nonneg (by fun_prop) (by fun_prop))
pfr/blueprint/src/chapter/fibring.tex:3
pfr/PFR/Fibring.lean:62
PFR
rdist_of_hom_le
\begin{corollary}\label{fibring-ineq} \lean{rdist_of_hom_le} \leanok If $\pi:G\to H$ is a homomorphism of additive groups and $X,Y$ are $G$-valued random variables then \[d[X;Y]\geq d[\pi(X);\pi(Y)].\] \end{corollary} \begin{proof} \uses{fibring-ident, ruzsa-nonneg}\leanok By \Cref{fibring-ident} and the nonnegativity of conditional Ruzsa distance (from \Cref{ruzsa-nonneg}) we have \[d[X;Y]\geq d[\pi(X);\pi(Y)]+d[X\mid \pi(X);Y\mid \pi(Y)].\] The inequality follows from $d[X\mid \pi(X);Y\mid \pi(Y)]\geq 0$ (\Cref{ruzsa-nonneg}). \end{proof}
/-- \[d[X;Y]\geq d[\pi(X);\pi(Y)].\] -/ lemma rdist_of_hom_le {Z_1 : Ω → H} {Z_2 : Ω' → H} (h1 : Measurable Z_1) (h2 : Measurable Z_2) [FiniteRange Z_1] [FiniteRange Z_2] : d[π ∘ Z_1; μ # π ∘ Z_2; μ'] ≤ d[Z_1; μ # Z_2; μ'] := by apply le_trans _ (rdist_le_sum_fibre π h1 h2 (μ := μ) (μ' := μ')) rw [le_add_iff_nonneg_right] exact condRuzsaDist_nonneg h1 (by fun_prop) h2 (by fun_prop) end GeneralFibring variable {G : Type*} [AddCommGroup G] [Fintype G] [hG : MeasurableSpace G] [MeasurableSingletonClass G] variable {Ω : Type*} [mΩ : MeasurableSpace Ω] {μ : Measure Ω} [IsProbabilityMeasure μ]
pfr/blueprint/src/chapter/fibring.tex:37
pfr/PFR/Fibring.lean:75
PFR
rdist_of_indep_eq_sum_fibre
\begin{proposition}[General fibring identity]\label{fibring-ident} \lean{rdist_of_indep_eq_sum_fibre, rdist_le_sum_fibre}\leanok Let $\pi : H \to H'$ be a homomorphism additive groups, and let $Z_1,Z_2$ be $H$-valued random variables. Then we have \[ d[Z_1; Z_2] \geq d[\pi(Z_1);\pi(Z_2)] + d[Z_1|\pi(Z_1); Z_2 |\pi(Z_2)]. \] Moreover, if $Z_1,Z_2$ are taken to be independent, then the difference between the two sides is $$I( Z_1 - Z_2 : (\pi(Z_1), \pi(Z_2)) | \pi(Z_1 - Z_2) ).$$ \end{proposition} \begin{proof}\uses{ruz-copy, independent-exist, submodularity,conditional-mutual-alt,chain-rule,relabeled-entropy, cond-dist-alt}\leanok Let $Z_1,Z_2$ be independent throughout (this is possible by \Cref{ruz-copy} and \Cref{independent-exist}). By \Cref{cond-dist-alt}, We have \begin{align*} & d[Z_1 |\pi(Z_1); Z_2 |\pi(Z_2)] \\ & = \bbH[Z_1 - Z_2 | \pi(Z_1),\pi(Z_2)] - \tfrac{1}{2} \bbH[Z_1 | \pi(Z_1)] - \tfrac{1}{2} \bbH[Z_2 | \pi(Z_2)] \\ & \leq \bbH[Z_1 - Z_2 | \pi(Z_1+Z_2)] - \tfrac{1}{2} \bbH[Z_1 | \pi(Z_1)] - \tfrac{1}{2}H[Z_2 | \pi(Z_2)] \\ & = d[Z_1;Z_2] - d[\pi(Z_1);\pi(Z_2)]. \end{align*} In the middle step, we used \Cref{submodularity}, and in the last step we used the fact that \[\bbH[Z_1 - Z_2 | \pi(Z_1-Z_2)] = \bbH[Z_1 - Z_2] - \bbH[\pi(Z_1-Z_2)]\] (thanks to \Cref{chain-rule} and \Cref{relabeled-entropy}) and that \[\bbH[Z_i| \pi(Z_i)] = \bbH[Z_i] - \bbH[\pi(Z_i)]\] (since $Z_i$ determines $\pi(Z_i)$). This gives the claimed inequality. The difference between the two sides is precisely \[\bbH[Z_1 - Z_2 | \pi(Z_1 - Z_2)] - \bbH[Z_1 - Z_2 | \pi(Z_1),\pi(Z_2)].\] To rewrite this in terms of (conditional) mutual information, we use the identity \[\bbH[A|B] - \bbH[A | B,C] = \bbI[A : C | B],\] (which follows \Cref{conditional-mutual-alt}) taking $A := Z_1 - Z_2$, $B := \pi(Z_1 - Z_2)$ and $C := (\pi(Z_1),\pi(Z_{2}))$, and noting that in this case $\bbH[A | B,C] = \bbH[A | C]$ since $C$ uniquely determines $B$ (this may require another helper lemma about entropy). This completes the proof. \end{proof}
lemma rdist_of_indep_eq_sum_fibre {Z_1 Z_2 : Ω → H} (h : IndepFun Z_1 Z_2 μ) (h1 : Measurable Z_1) (h2 : Measurable Z_2) [FiniteRange Z_1] [FiniteRange Z_2]: d[Z_1; μ # Z_2; μ] = d[π ∘ Z_1; μ # π ∘ Z_2; μ] + d[Z_1|π∘Z_1; μ # Z_2|π∘Z_2; μ] + I[Z_1-Z_2 : ⟨π∘Z_1, π∘Z_2⟩ | π∘(Z_1 - Z_2); μ] := by have hπ : Measurable π := .of_discrete have step1 : d[Z_1; μ # Z_2; μ] = d[π ∘ Z_1; μ # π ∘ Z_2; μ] + H[(Z_1 - Z_2)| π ∘ (Z_1 - Z_2); μ] - H[Z_1 | π ∘ Z_1; μ] / 2 - H[Z_2 | π ∘ Z_2; μ] / 2 := by have hsub : H[(Z_1 - Z_2)| π ∘ (Z_1 - Z_2); μ] = H[(Z_1 - Z_2); μ] - H[π ∘ (Z_1 - Z_2); μ] := condEntropy_comp_self (by fun_prop) hπ rw [h.rdist_eq h1 h2, (h.comp hπ hπ).rdist_eq (hπ.comp h1) (hπ.comp h2), condEntropy_comp_self h1 hπ, condEntropy_comp_self h2 hπ, hsub, map_comp_sub π] ring_nf have m0 : Measurable (fun x ↦ (x, π x)) := .of_discrete have h' : IndepFun (⟨Z_1, π ∘ Z_1⟩) (⟨Z_2, π ∘ Z_2⟩) μ := h.comp m0 m0 have m1 : Measurable (Z_1 - Z_2) := h1.sub h2 have m2 : Measurable (⟨↑π ∘ Z_1, ↑π ∘ Z_2⟩) := (hπ.comp h1).prodMk (hπ.comp h2) have m3 : Measurable (↑π ∘ (Z_1 - Z_2)) := hπ.comp m1 have entroplem : H[Z_1 - Z_2|⟨⟨↑π ∘ Z_1, ↑π ∘ Z_2⟩, ↑π ∘ (Z_1 - Z_2)⟩; μ] = H[Z_1 - Z_2|⟨↑π ∘ Z_1, ↑π ∘ Z_2⟩; μ] := by rw [map_comp_sub π] let f : H' × H' → (H' × H') × H' := fun (x,y) ↦ ((x,y), x - y) have hf : Injective f := fun _ _ h ↦ (Prod.ext_iff.1 h).1 have mf : Measurable f := measurable_id.prodMk measurable_sub refine condEntropy_of_injective' μ m1 m2 f hf (mf.comp m2) rw [step1, condMutualInfo_eq' m1 m2 m3, entroplem, condRuzsaDist_of_indep h1 (hπ.comp h1) h2 (hπ.comp h2) μ h'] ring_nf
pfr/blueprint/src/chapter/fibring.tex:3
pfr/PFR/Fibring.lean:35
PFR
rdist_of_neg_le
\begin{lemma}[Flipping a sign]\label{sign-flip}\lean{rdist_of_neg_le}\leanok If $X,Y$ are $G$-valued, then $$ d[X ; -Y] \leq 3 d[X;Y].$$ \end{lemma} \begin{proof}\uses{ruz-copy, independent-exist, cond-indep-exist, alt-submodularity, ruz-indep, neg-ent, relabeled-entropy, add-entropy, subadditive, data-process-single}\leanok Without loss of generality (using \Cref{ruz-copy} and \Cref{independent-exist}) we may take $X,Y$ to be independent. By $(X_1,Y_1)$, $(X_2,Y_2)$ be copies of $(X,Y)$ that are conditionally independent over $X_1-Y_1=X_2-Y_2$ (this exists thanks to \Cref{cond-indep-exist}). By \Cref{independent-exist}, we can also find another copy $(X_3,Y_3)$ of $(X,Y)$ that is independent of $X_1,Y_1,X_2,Y_2$. From \Cref{alt-submodularity}, one has $$ \bbH[X_3-Y_2, X_1-Y_3, X_2, Y_1, X_3, Y_3, X_3+Y_3] + \bbH[X_3+Y_3] \leq \bbH[X_3-Y_2, X_1-Y_3, X_2, Y_1, X_3+Y_3] + \bbH[X_3, Y_3, X_3+Y_3].$$ From \Cref{ruz-indep}, \Cref{neg-ent}, \Cref{ruz-copy} we have $$ \bbH[X_3+Y_3] = \frac{1}{2} \bbH[X_3] + \frac{1}{2} \bbH[-Y_3] + d[X_3;-Y_3] = \frac{1}{2} \bbH[X] + \frac{1}{2} \bbH[Y] + d[X;-Y].$$ Since $X_3+Y_3$ is a function of $X_3,Y_3$, we see from \Cref{relabeled-entropy} and \Cref{add-entropy} that $$ \bbH[X_3,Y_3,X_3+Y_3] = \bbH[X_3,Y_3] = \bbH[X,Y] = \bbH[X]+\bbH[Y].$$ Because $X_1-Y_1=X_2-Y_2$, we have $$ X_3+Y_3 = (X_3-Y_2) - (X_1-Y_3) + (X_2+Y_1)$$ and thus by \Cref{relabeled-entropy} $$ \bbH[X_3-Y_2, X_1-Y_3, X_2, Y_1, X_3+Y_3] = \bbH[X_3-Y_2, X_1-Y_3, X_2, Y_1]$$ and hence by \Cref{subadditive} $$ \bbH[X_3-Y_2, X_1-Y_3, X_2, Y_1, X_3+Y_3] \leq \bbH[X_3-Y_2] + \bbH[X_1-Y_3] + \bbH[X_2] + \bbH[Y_1].$$ Since $X_3,Y_2$ are independent, we see from \Cref{ruz-indep}, \Cref{ruz-copy} that $$\bbH[X_3-Y_2] = \frac{1}{2} \bbH[X] + \frac{1}{2} \bbH[Y] + d[X; Y].$$ Similarly $$ \bbH[X_1-Y_3] = \frac{1}{2} \bbH[X] + \frac{1}{2} \bbH[Y] + d[X; Y].$$ We conclude that $$ \bbH[X_3-Y_2, X_1-Y_3, X_2, Y_1, X_3+Y_3] \leq 2\bbH[X] + 2\bbH[Y] + 2d[X; Y].$$ Finally, from \Cref{data-process-single} we have $$ \bbH[X_1,Y_1,X_2,Y_2,X_3,Y_3] \leq \bbH[X_3-Y_2, X_1-Y_3, X_2, Y_1, X_3, Y_3, X_3+Y_3].$$ From \Cref{add-entropy} followed by \Cref{cond-trial-ent}, we have $$\bbH[X_1,Y_1,X_2,Y_2,X_3,Y_3] = \bbH[X_1,Y_1,X_1-Y_1] + \bbH[X_2,Y_2,X_2-Y_2] - \bbH[X_1-Y_1] + \bbH[X_3,Y_3]$$ and thus by \Cref{ruz-indep}, \Cref{ruz-copy}, \Cref{relabeled-entropy}, \Cref{add-entropy} $$\bbH[X_1,Y_1,X_2,Y_2,X_3,Y_3] = \bbH[X] + \bbH[Y] + \bbH[X] + \bbH[Y] -\left(\frac{1}{2}\bbH[X] + \frac{1}{2}\bbH[Y] + d[X; Y]\right) + \bbH[X] + \bbH[Y].$$ Applying all of these estimates, the claim now follows from linear arithmetic. \end{proof}
/-- If `X, Y` are `G`-valued, then `d[X;-Y] ≤ 3 d[X;Y]`. -/ lemma rdist_of_neg_le [IsProbabilityMeasure μ] [IsProbabilityMeasure μ'] (hX : Measurable X) (hY : Measurable Y) [Fintype G] : d[X ; μ # -Y ; μ'] ≤ 3 * d[X ; μ # Y ; μ'] := by obtain ⟨ν, X', Y', hν, mX', mY', h_indep', hXX', hYY'⟩ := independent_copies hX hY μ μ' rw [← IdentDistrib.rdist_eq hXX' hYY', ← IdentDistrib.rdist_eq hXX' (IdentDistrib.neg hYY')] obtain ⟨Ω₀, mΩ₀, XY'₁, XY'₂, Z', ν'₀, hν'₀, hXY'₁, hXY'₂, hZ', h_condIndep, h_id1sub, h_id2sub⟩ := condIndep_copies (⟨X', Y'⟩) (X' - Y') (mX'.prodMk mY') (by fun_prop) ν let X₁' := fun ω ↦ (XY'₁ ω).fst let Y'₁ := fun ω ↦ (XY'₁ ω).snd let X₂' := fun ω ↦ (XY'₂ ω).fst let Y'₂ := fun ω ↦ (XY'₂ ω).snd have mX₁' : Measurable X₁' := by fun_prop have mY'₁ : Measurable Y'₁ := by fun_prop have Z'eq1 : Z' =ᵐ[ν'₀] X₁' - Y'₁ := (IdentDistrib.ae_snd h_id1sub.symm (MeasurableSet.of_discrete (s := {x | x.2 = x.1.1 - x.1.2})) (Eventually.of_forall fun ω ↦ rfl) :) obtain ⟨ν₀, XY₁XY₂Z, XY₃, hν₀, hXY₁XY₂Z, hXY₃, h_indep, h_idXY₁XY₂Z, h_idXY₃⟩ := independent_copies (hXY'₁.prodMk hXY'₂ |>.prodMk hZ') (mX'.prodMk mY') ν'₀ ν let X₁ := fun ω ↦ (XY₁XY₂Z ω).fst.fst.fst let Y₁ := fun ω ↦ (XY₁XY₂Z ω).fst.fst.snd let X₂ := fun ω ↦ (XY₁XY₂Z ω).fst.snd.fst let Y₂ := fun ω ↦ (XY₁XY₂Z ω).fst.snd.snd let Z := fun ω ↦ (XY₁XY₂Z ω).snd let X₃ := fun ω ↦ (XY₃ ω).fst let Y₃ := fun ω ↦ (XY₃ ω).snd have mX₁ : Measurable X₁ := by fun_prop have mY₁ : Measurable Y₁ := by fun_prop have mX₂ : Measurable X₂ := by fun_prop have mY₂ : Measurable Y₂ := by fun_prop have mX₃ : Measurable X₃ := by fun_prop have mY₃ : Measurable Y₃ := by fun_prop have mZ : Measurable Z := by fun_prop have idXY₁Z : IdentDistrib (⟨⟨X₁, Y₁⟩, Z⟩) (⟨⟨X', Y'⟩, X' - Y'⟩) ν₀ ν := h_idXY₁XY₂Z.comp (.of_discrete (f := fun x ↦ (x.1.1, x.2))) |>.trans h_id1sub have idXY₂Z : IdentDistrib (⟨⟨X₂, Y₂⟩, Z⟩) (⟨⟨X', Y'⟩, X' - Y'⟩) ν₀ ν := h_idXY₁XY₂Z.comp (.of_discrete (f := fun x ↦ (x.1.2, x.2))) |>.trans h_id2sub have idXY₁ : IdentDistrib (⟨X₁, Y₁⟩) (⟨X', Y'⟩) ν₀ ν := by convert h_idXY₁XY₂Z.comp (.of_discrete (f := fun x ↦ x.1.1)) |>.trans ?_ exact h_id1sub.comp (.of_discrete (f := fun ((x, y), _) ↦ (x, y))) have idXY₂ : IdentDistrib (⟨X₂, Y₂⟩) (⟨X', Y'⟩) ν₀ ν := by convert h_idXY₁XY₂Z.comp (.of_discrete (f := fun x ↦ x.1.2)) |>.trans ?_ exact h_id2sub.comp (.of_discrete (f := fun ((x, y), _) ↦ (x, y))) have idXY₃ : IdentDistrib (⟨X₃, Y₃⟩) (⟨X', Y'⟩) ν₀ ν := h_idXY₃ have idX₁ : IdentDistrib X₁ X' ν₀ ν := idXY₁.comp (by fun_prop) have idY₁ : IdentDistrib Y₁ Y' ν₀ ν := idXY₁.comp (by fun_prop) have idX₂ : IdentDistrib X₂ X' ν₀ ν := idXY₂.comp (by fun_prop) have idY₂ : IdentDistrib Y₂ Y' ν₀ ν := idXY₂.comp (by fun_prop) have idX₃ : IdentDistrib X₃ X' ν₀ ν := idXY₃.comp (by fun_prop) have idY₃ : IdentDistrib Y₃ Y' ν₀ ν := idXY₃.comp (by fun_prop) have idXY₁₂XY'₁₂ : IdentDistrib (⟨⟨X₁, Y₁⟩, ⟨X₂, Y₂⟩⟩) (⟨⟨X₁', Y'₁⟩, ⟨X₂', Y'₂⟩⟩) ν₀ ν'₀ := h_idXY₁XY₂Z.comp (.of_discrete (f := fun x ↦ x.1)) have idXY₁ZXY'₁Z' : IdentDistrib (⟨⟨X₁, Y₁⟩, Z⟩) (⟨⟨X₁', Y'₁⟩, Z'⟩) ν₀ ν'₀ := h_idXY₁XY₂Z.comp (.of_discrete (f := fun x ↦ (x.1.1, x.2))) have idXY₂ZXY'₂Z' : IdentDistrib (⟨⟨X₂, Y₂⟩, Z⟩) (⟨⟨X₂', Y'₂⟩, Z'⟩) ν₀ ν'₀ := h_idXY₁XY₂Z.comp (.of_discrete (f := fun x ↦ (x.1.2, x.2))) have idZZ' : IdentDistrib Z Z' ν₀ ν'₀ := h_idXY₁XY₂Z.comp .of_discrete have Zeq1 : Z =ᵐ[ν₀] X₁ - Y₁ := (IdentDistrib.ae_snd idXY₁Z.symm (MeasurableSet.of_discrete (s := {x | x.2 = x.1.1 - x.1.2})) (Eventually.of_forall fun ω ↦ rfl) :) have Zeq2 : Z =ᵐ[ν₀] X₂ - Y₂ := (IdentDistrib.ae_snd idXY₂Z.symm (MeasurableSet.of_discrete (s := {x | x.2 = x.1.1 - x.1.2})) (Eventually.of_forall fun ω ↦ rfl) :) have iX₁Y₃ : IndepFun X₁ Y₃ ν₀ := by convert h_indep.comp (.of_discrete (f := fun x ↦ x.1.1.1)) (.of_discrete (f := fun x ↦ x.2)) have iX₃Y₂ : IndepFun X₃ Y₂ ν₀ := by convert h_indep.symm.comp (.of_discrete (f := fun x ↦ x.1)) (.of_discrete (f := fun x ↦ x.1.2.2)) have iX₁Y₁ : IndepFun X₁ Y₁ ν₀ := indepFun_of_identDistrib_pair h_indep' idXY₁.symm have iX₂Y₂ : IndepFun X₂ Y₂ ν₀ := indepFun_of_identDistrib_pair h_indep' idXY₂.symm have iX₃Y₃ : IndepFun X₃ Y₃ ν₀ := indepFun_of_identDistrib_pair h_indep' idXY₃.symm have iX₃negY₃ : IndepFun X₃ (-Y₃) ν₀ := iX₃Y₃.comp measurable_id measurable_neg have i112233 : IndepFun (⟨⟨X₁, Y₁⟩, ⟨X₂, Y₂⟩⟩) (⟨X₃, Y₃⟩) ν₀ := h_indep.comp (.of_discrete (f := fun (xy, _) ↦ xy)) measurable_id have hX1 : H[X' ; ν] = H[X₁ ; ν₀] := idX₁.entropy_eq.symm have hX2 : H[X' ; ν] = H[X₂ ; ν₀] := idX₂.entropy_eq.symm have hX3 : H[X' ; ν] = H[X₃ ; ν₀] := idX₃.entropy_eq.symm have hY1 : H[Y' ; ν] = H[Y₁ ; ν₀] := idY₁.entropy_eq.symm have hY2 : H[Y' ; ν] = H[Y₂ ; ν₀] := idY₂.entropy_eq.symm have hY3 : H[Y' ; ν] = H[Y₃ ; ν₀] := idY₃.entropy_eq.symm have hnegY3 : H[Y₃ ; ν₀] = H[-Y₃ ; ν₀] := (entropy_neg mY₃).symm have hX1Y1 : H[⟨X₁, Y₁⟩; ν₀] = H[X'; ν] + H[Y'; ν] := hX1.symm ▸ hY1.symm ▸ (entropy_pair_eq_add mX₁ mY₁).mpr iX₁Y₁ have hX2Y2 : H[⟨X₂, Y₂⟩; ν₀] = H[X'; ν] + H[Y'; ν] := hX2.symm ▸ hY2.symm ▸ (entropy_pair_eq_add mX₂ mY₂).mpr iX₂Y₂ have hX3Y3 : H[⟨X₃, Y₃⟩; ν₀] = H[X'; ν] + H[Y'; ν] := hX3.symm ▸ hY3.symm ▸ (entropy_pair_eq_add mX₃ mY₃).mpr iX₃Y₃ have dX3negY3 : d[X' ; ν # -Y' ; ν] = d[X₃ ; ν₀ # -Y₃ ; ν₀] := (idX₃.rdist_eq idY₃.neg).symm have dX1Y1 : d[X' ; ν # Y' ; ν] = d[X₁ ; ν₀ # Y₁ ; ν₀] := (idX₁.rdist_eq idY₁).symm have dX1Y3 : d[X' ; ν # Y' ; ν] = d[X₁ ; ν₀ # Y₃ ; ν₀] := (idX₁.rdist_eq idY₃).symm have dX3Y2 : d[X' ; ν # Y' ; ν] = d[X₃ ; ν₀ # Y₂ ; ν₀] := (idX₃.rdist_eq idY₂).symm have meas1321 : Measurable (⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩) := (mX₁.sub mY₃).prodMk <| mX₂.prodMk mY₁ have meas321321 : Measurable (⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩) := (mX₃.sub mY₂).prodMk meas1321 have meas11 : Measurable (⟨X₁, Y₁⟩) := mX₁.prodMk mY₁ have meas22 : Measurable (⟨X₂, Y₂⟩) := mX₂.prodMk mY₂ have meas1122 : Measurable (⟨⟨X₁, Y₁⟩, ⟨X₂, Y₂⟩⟩) := meas11.prodMk meas22 have meas33 : Measurable (⟨X₃, Y₃⟩) := mX₃.prodMk mY₃ have meas1neg1 : Measurable (X₁ - Y₁) := mX₁.sub mY₁ have in1 : H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, ⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩⟩ ; ν₀] + H[X₃ + Y₃; ν₀] ≤ H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, X₃ + Y₃⟩ ; ν₀] + H[⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩ ; ν₀] := entropy_triple_add_entropy_le _ (by fun_prop) meas33 (mX₃.add mY₃) have eq2 : H[X₃ + Y₃; ν₀] = 1/2 * H[X'; ν] + 1/2 * H[Y'; ν] + d[X'; ν # -Y'; ν] := by rw [hX3, hY3, dX3negY3, hnegY3, IndepFun.rdist_eq iX₃negY₃ mX₃ mY₃.neg, sub_neg_eq_add] ring have eq3 : H[⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩ ; ν₀] = H[X'; ν] + H[Y'; ν] := hX3Y3 ▸ entropy_of_comp_eq_of_comp ν₀ (meas33 |>.prodMk <| mX₃.add mY₃) meas33 (fun ((x3, y3), _) ↦ (x3, y3)) (fun (x3, y3) ↦ ((x3, y3), x3 + y3)) rfl rfl have eq4' : X₁ =ᵐ[ν₀] X₂ - Y₂ + Y₁ := by filter_upwards [Zeq1, Zeq2] with ω hZ hZ' simp only [Pi.add_apply, ← hZ', hZ, Pi.sub_apply, sub_add_cancel] have eq4 : X₃ + Y₃ =ᵐ[ν₀] (X₃ - Y₂) - (X₁ - Y₃) + X₂ + Y₁ := by filter_upwards [eq4'] with ω h simp only [Pi.add_apply, sub_eq_add_neg, neg_add_rev, neg_neg, add_assoc, Pi.neg_apply, h, neg_add_cancel, add_zero, neg_add_cancel_comm_assoc] have eq5 : H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, X₃ + Y₃⟩ ; ν₀] = H[⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩ ; ν₀] := calc _ = H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, (X₃ - Y₂) - (X₁ - Y₃) + X₂ + Y₁⟩ ; ν₀] := by refine IdentDistrib.entropy_eq <| IdentDistrib.of_ae_eq (meas321321.prodMk <| mX₃.add mY₃).aemeasurable ?_ filter_upwards [eq4] with ω h simp only [Prod.mk.injEq, h, Pi.add_apply, Pi.sub_apply, and_self] _ = _ := by refine entropy_of_comp_eq_of_comp ν₀ (meas321321.prodMk <| (((mX₃.sub mY₂).sub (mX₁.sub mY₃)).add mX₂).add mY₁) meas321321 (fun ((x3y2, (x1y3, (x2, y1))), _) ↦ (x3y2, (x1y3, (x2, y1)))) (fun (x3y2, (x1y3, (x2, y1))) ↦ ((x3y2, (x1y3, (x2, y1))), x3y2 - x1y3 + x2 + y1)) rfl rfl have in6 : H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, X₃ + Y₃⟩ ; ν₀] ≤ H[X₃ - Y₂; ν₀] + H[X₁ - Y₃; ν₀] + H[X₂; ν₀] + H[Y₁; ν₀] := by rw [eq5] refine (entropy_pair_le_add ?_ meas1321 ν₀).trans ?_ · exact (mX₃.sub mY₂) simp only [add_assoc, add_le_add_iff_left] refine (entropy_pair_le_add ?_ ?_ ν₀).trans ?_ · exact (mX₁.sub mY₃) · exact (mX₂.prodMk mY₁) simp only [add_assoc, add_le_add_iff_left] exact entropy_pair_le_add mX₂ mY₁ ν₀ have eq7 : H[X₃ - Y₂; ν₀] = 1/2 * (H[X'; ν] + H[Y'; ν]) + d[X'; ν # Y'; ν] := by rw [dX3Y2, IndepFun.rdist_eq iX₃Y₂ mX₃ mY₂, hX3, hY2] ring_nf have eq8 : H[X₁ - Y₃; ν₀] = 1/2 * (H[X'; ν] + H[Y'; ν]) + d[X'; ν # Y'; ν] := by rw [dX1Y3, IndepFun.rdist_eq iX₁Y₃ mX₁ mY₃, hX1, hY3] ring_nf have eq8' : H[X₁ - Y₁; ν₀] = 1/2 * (H[X'; ν] + H[Y'; ν]) + d[X'; ν # Y'; ν] := by rw [dX1Y1, IndepFun.rdist_eq iX₁Y₁ mX₁ mY₁, hX1, hY1] ring_nf have in9 : H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, X₃ + Y₃⟩ ; ν₀] ≤ 2 * H[X'; ν] + 2 * H[Y'; ν] + 2 * d[X'; ν # Y'; ν] := by rw [eq7, eq8, ← hX2, ← hY1] at in6 ring_nf at in6 ⊢ exact in6 have in10 : H[⟨X₁, ⟨Y₁, ⟨X₂, ⟨Y₂, ⟨X₃, Y₃⟩⟩⟩⟩⟩ ; ν₀] ≤ H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, ⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩⟩ ; ν₀] := by convert entropy_comp_le ν₀ (meas321321.prodMk <| meas33.prodMk <| mX₃.add mY₃) (fun ((x3y2, (x1y3, (x2, y1))), ((x3, y3), _)) ↦ (x1y3 + y3, (y1, (x2, (x3 - x3y2, (x3, y3)))))) <;> simp only [comp_apply, Pi.sub_apply, sub_add_cancel, sub_sub_cancel] have eq11 : H[⟨X₁, ⟨Y₁, ⟨X₂, ⟨Y₂, ⟨X₃, Y₃⟩⟩⟩⟩⟩ ; ν₀] = H[⟨X₁, ⟨Y₁, X₁ - Y₁⟩⟩ ; ν₀] + H[⟨X₂, ⟨Y₂, X₂ - Y₂⟩⟩ ; ν₀] - H[X₁ - Y₁; ν₀] + H[⟨X₃, Y₃⟩ ; ν₀] := by calc _ = H[⟨⟨X₁', Y'₁⟩, ⟨X₂', Y'₂⟩⟩ ; ν'₀] + H[⟨X₃, Y₃⟩ ; ν₀] := by rw [← idXY₁₂XY'₁₂.entropy_eq, ← (entropy_pair_eq_add meas1122 meas33).mpr i112233] exact entropy_of_comp_eq_of_comp ν₀ (mX₁.prodMk <| mY₁.prodMk <| mX₂.prodMk <| mY₂.prodMk <| meas33) (meas1122.prodMk meas33) (fun (x1, (y1, (x2, (y2, (x3, y3))))) ↦ (((x1, y1), (x2, y2)), (x3, y3))) (fun (((x1, y1), (x2, y2)), (x3, y3)) ↦ (x1, (y1, (x2, (y2, (x3, y3)))))) rfl rfl _ = H[⟨⟨X₁', Y'₁⟩, ⟨⟨X₂', Y'₂⟩, X₁' - Y'₁⟩⟩ ; ν'₀] + H[⟨X₃, Y₃⟩ ; ν₀] := by congr 1 exact entropy_of_comp_eq_of_comp ν'₀ (hXY'₁.prodMk hXY'₂) (hXY'₁.prodMk <| hXY'₂.prodMk <| mX₁'.sub mY'₁) (fun ((x1, y1), (x2, y2)) ↦ ((x1, y1), ((x2, y2), x1 - y1))) (fun ((x1, y1), ((x2, y2), _)) ↦ ((x1, y1), (x2, y2))) rfl rfl _ = H[⟨⟨X₁', Y'₁⟩, ⟨⟨X₂', Y'₂⟩, Z'⟩⟩ ; ν'₀] + H[⟨X₃, Y₃⟩ ; ν₀] := by congr 1 refine IdentDistrib.entropy_eq <| IdentDistrib.of_ae_eq (hXY'₁.prodMk <| hXY'₂.prodMk <| mX₁'.sub mY'₁).aemeasurable ?_ filter_upwards [Z'eq1] with ω h simp only [Prod.mk.injEq, Pi.sub_apply, h, and_self] _ = H[⟨⟨X₁, Y₁⟩, Z⟩ ; ν₀] + H[⟨⟨X₂, Y₂⟩, Z⟩ ; ν₀] - H[Z ; ν₀] + H[⟨X₃, Y₃⟩ ; ν₀] := by rw [ent_of_cond_indep (μ := ν'₀) hXY'₁ hXY'₂ hZ' h_condIndep, idXY₁ZXY'₁Z'.entropy_eq, idXY₂ZXY'₂Z'.entropy_eq, idZZ'.entropy_eq] _ = H[⟨⟨X₁, Y₁⟩, X₁ - Y₁⟩ ; ν₀] + H[⟨⟨X₂, Y₂⟩, X₂ - Y₂⟩ ; ν₀] - H[X₁ - Y₁ ; ν₀] + H[⟨X₃, Y₃⟩ ; ν₀] := by rw [IdentDistrib.entropy_eq <| IdentDistrib.of_ae_eq mZ.aemeasurable Zeq1] congr 3 · refine IdentDistrib.entropy_eq <| IdentDistrib.of_ae_eq (((mX₁.prodMk mY₁).prodMk mZ).aemeasurable) ?_ filter_upwards [Zeq1] with ω h simp only [Prod.mk.injEq, h, Pi.sub_apply, and_self] · refine IdentDistrib.entropy_eq <| IdentDistrib.of_ae_eq ((mX₂.prodMk mY₂).prodMk mZ).aemeasurable ?_ filter_upwards [Zeq2] with ω h simp only [Prod.mk.injEq, h, Pi.sub_apply, and_self] _ = H[⟨X₁, ⟨Y₁, X₁ - Y₁⟩⟩ ; ν₀] + H[⟨X₂, ⟨Y₂, X₂ - Y₂⟩⟩ ; ν₀] - H[X₁ - Y₁; ν₀] + H[⟨X₃, Y₃⟩ ; ν₀] := by congr 3 · exact entropy_of_comp_eq_of_comp ν₀ (meas11.prodMk meas1neg1) (mX₁.prodMk <| mY₁.prodMk <| mX₁.sub mY₁) (fun ((x1, y1),x1y1) ↦ (x1, (y1, x1y1))) (fun (x1, (y1, x1y1)) ↦ ((x1, y1),x1y1)) rfl rfl · exact entropy_of_comp_eq_of_comp ν₀ (meas22.prodMk <| (mX₂).sub (mY₂)) (mX₂.prodMk <| mY₂.prodMk <| mX₂.sub mY₂) (fun ((x1, y1),x1y1) ↦ (x1, (y1, x1y1))) (fun (x1, (y1, x1y1)) ↦ ((x1, y1),x1y1)) rfl rfl have eq12_aux1 : H[⟨X₁, ⟨Y₁, X₁ - Y₁⟩⟩ ; ν₀] = H[⟨X₁, Y₁⟩ ; ν₀] := entropy_of_comp_eq_of_comp ν₀ (mX₁.prodMk <| mY₁.prodMk <| mX₁.sub mY₁) meas11 (fun (x1, (y1, _)) ↦ (x1, y1)) (fun (x1, y1) ↦ (x1, (y1, x1 - y1))) rfl rfl have eq12_aux2 : H[⟨X₂, ⟨Y₂, X₂ - Y₂⟩⟩ ; ν₀] = H[⟨X₂, Y₂⟩ ; ν₀] := entropy_of_comp_eq_of_comp ν₀ (mX₂.prodMk <| mY₂.prodMk <| mX₂.sub mY₂) meas22 (fun (x1, (y1, _)) ↦ (x1, y1)) (fun (x1, y1) ↦ (x1, (y1, x1 - y1))) rfl rfl have eq12 : H[⟨X₁, ⟨Y₁, ⟨X₂, ⟨Y₂, ⟨X₃, Y₃⟩⟩⟩⟩⟩ ; ν₀] = 5/2 * (H[X'; ν] + H[Y'; ν]) - d[X'; ν # Y'; ν] := by rw [eq11, eq8', eq12_aux1, eq12_aux2, hX1Y1, hX2Y2, hX3Y3] ring_nf suffices h : 3 * (H[X'; ν] + H[Y'; ν]) - d[X'; ν # Y'; ν] + d[X'; ν # -Y'; ν] ≤ 3 * (H[X'; ν] + H[Y'; ν]) + 2 * d[X'; ν # Y'; ν] by simp only [sub_eq_add_neg, add_assoc, add_le_add_iff_left, neg_add_le_iff_le_add] at h ring_nf at h ⊢ exact h calc _ = 5/2 * (H[X' ; ν] + H[Y' ; ν]) - d[X' ; ν # Y' ; ν] + 1/2 * (H[X' ; ν] + H[Y' ; ν]) + d[X' ; ν # -Y' ; ν] := by ring _ ≤ H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, ⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩⟩ ; ν₀] + 1/2 * (H[X' ; ν] + H[Y' ; ν]) + d[X' ; ν # -Y' ; ν] := by simp only [one_div, add_le_add_iff_right, eq12 ▸ in10] _ = H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, ⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩⟩ ; ν₀] + H[X₃ + Y₃ ; ν₀] := by simp only [one_div, eq2] ring _ ≤ H[⟨⟨X₃ - Y₂, ⟨X₁ - Y₃, ⟨X₂, Y₁⟩⟩⟩, X₃ + Y₃⟩ ; ν₀] + H[⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩ ; ν₀] := in1 _ ≤ 2 * (H[X' ; ν] + H[Y' ; ν]) + 2 * d[X' ; ν # Y' ; ν] + H[⟨⟨X₃, Y₃⟩, X₃ + Y₃⟩ ; ν₀] := by gcongr ring_nf at in9 ⊢ simp only [in9] _ = 3 * (H[X' ; ν] + H[Y' ; ν]) + 2 * d[X' ; ν # Y' ; ν] := by simp only [eq3] ring /-- If `n ≥ 0` and `X, Y₁, ..., Yₙ` are jointly independent `G`-valued random variables, then `H[Y i₀ + ∑ i ∈ s, Y i; μ] - H[Y i₀; μ] ≤ ∑ i ∈ s, (H[Y i₀ + Y i; μ] - H[Y i₀; μ])`. The spelling here is tentative. Feel free to modify it to make the proof easier, or the application easier. -/
pfr/blueprint/src/chapter/torsion.tex:39
pfr/PFR/MoreRuzsaDist.lean:107
PFR
rdist_of_sums_ge
\begin{lemma}[Lower bound on distances]\label{first-dist-sum} \lean{rdist_of_sums_ge}\leanok We have \begin{align*} d[X_1+\tilde X_2; X_2+\tilde X_1] \geq k &- \eta (d[X^0_1; X_1+\tilde X_2] - d[X^0_1; X_1]) \\& \qquad- \eta (d[X^0_2; X_2+\tilde X_1] - d[X^0_2; X_2]) \end{align*} \end{lemma} \begin{proof}\uses{distance-lower}\leanok Immediate from \Cref{distance-lower}. \end{proof}
lemma rdist_of_sums_ge : d[X₁ + X₂' # X₂ + X₁'] ≥ k - p.η * (d[p.X₀₁ # X₁ + X₂'] - d[p.X₀₁ # X₁]) - p.η * (d[p.X₀₂ # X₂ + X₁'] - d[p.X₀₂ # X₂]) := distance_ge_of_min _ h_min (hX₁.add hX₂') (hX₂.add hX₁') include h_min hX₁ hX₂ hX₁' hX₂' in /-- The distance $d[X_1|X_1+\tilde X_2; X_2|X_2+\tilde X_1]$ is at least $$ k - \eta (d[X^0_1; X_1 | X_1 + \tilde X_2] - d[X^0_1; X_1]) - \eta(d[X^0_2; X_2 | X_2 + \tilde X_1] - d[X^0_2; X_2]).$$ -/
pfr/blueprint/src/chapter/entropy_pfr.tex:91
pfr/PFR/FirstEstimate.lean:74
PFR
rdist_of_sums_ge'
\begin{lemma}[Distance between sums]\label{dist-sums} \lean{rdist_of_sums_ge'}\leanok We have $$ d[X_1+\tilde X_1; X_2+\tilde X_2] \geq k - \frac{\eta}{2} ( d[X_1; X_1] + d[X_2;X_2] ).$$ \end{lemma} \begin{proof}\uses{distance-lower, first-useful}\leanok From \Cref{distance-lower} one has \begin{align*} d[X_1+\tilde X_1; X_2+\tilde X_2] \geq k &- \eta(d[X^0_1;X_1] - d[X^0_1;X_1+\tilde X_1]) \\ &- \eta(d[X^0_2;X_2] - d[X^0_2;X_2+\tilde X_2]). \end{align*} Now \Cref{first-useful} gives $$ d[X^0_1;X_1+\tilde X_1] - d[X^0_1;X_1] \leq \tfrac{1}{2} d[X_1;X_1]$$ and $$ d[X^0_2;X_2+\tilde X_2] - d[X^0_2;X_2] \leq \tfrac{1}{2} d[X_2;X_2], $$ and the claim follows. \end{proof}
lemma rdist_of_sums_ge' : d[X₁ + X₁' # X₂ + X₂'] ≥ k - p.η * (d[X₁ # X₁] + d[X₂ # X₂]) / 2 := by refine LE.le.ge (LE.le.trans ?_ (distance_ge_of_min p h_min (hX₁.add hX₁') (hX₂.add hX₂'))) rw [sub_sub, sub_le_sub_iff_left k, ← mul_add,mul_div_assoc] refine mul_le_mul_of_nonneg_left ?_ (by linarith [p.hη]) have h₁' := condRuzsaDist_diff_le' ℙ p.hmeas1 hX₁ hX₁' (h_indep.indepFun (show 0 ≠ 2 by decide)) have h₂' := condRuzsaDist_diff_le' ℙ p.hmeas2 hX₂ hX₂' (h_indep.indepFun (show 1 ≠ 3 by decide)) rw [h₁.entropy_eq, add_sub_cancel_right, ← (IdentDistrib.refl hX₁.aemeasurable).rdist_eq h₁] at h₁' rw [h₂.entropy_eq, add_sub_cancel_right, ← (IdentDistrib.refl hX₂.aemeasurable).rdist_eq h₂] at h₂' linarith include h_min hX₁ hX₁' hX₂ hX₂' h_indep h₁ h₂ in
pfr/blueprint/src/chapter/entropy_pfr.tex:162
pfr/PFR/SecondEstimate.lean:57
PFR
rdist_zero_eq_half_ent
\begin{lemma}[Distance from zero]\label{dist-zero} \uses{ruz-dist-def}\lean{rdist_zero_eq_half_ent}\leanok If $X$ is a $G$-valued random variable and $0$ is the random variable taking the value $0$ everywhere then \[d[X;0]=\mathbb{H}(X)/2.\] \end{lemma} \begin{proof}\leanok This is an immediate consequence of the definitions and $X-0\equiv X$ and $\mathbb{H}(0)=0$. \end{proof}
/-- `d[X ; 0] = H[X] / 2`. -/ lemma rdist_zero_eq_half_ent [IsFiniteMeasure μ] [IsProbabilityMeasure μ'] : d[X ; μ # fun _ ↦ 0 ; μ'] = H[X ; μ]/2 := by have aux : H[fun x => x.1 - x.2 ; Measure.prod (Measure.map X μ) (Measure.map (fun x => 0) μ')] = H[X ; μ] := by have h: Measure.map (fun x => x.1 - x.2) (Measure.prod (Measure.map X μ) (Measure.map (fun x => 0) μ')) = Measure.map X μ := by simp [MeasureTheory.Measure.map_const, MeasureTheory.Measure.prod_dirac] rw [Measure.map_map (by fun_prop) (by fun_prop)] have helper : ((fun (x : G × G) => x.1 - x.2) ∘ fun x => (x, (0 : G))) = id := by funext; simp rw [helper, Measure.map_id] simp [entropy_def, h] simp [rdist_def, entropy_const (0 : G), aux] ring
pfr/blueprint/src/chapter/distance.tex:89
pfr/PFR/ForMathlib/Entropy/RuzsaDist.lean:213
PFR
rhoMinus_of_subgroup
\begin{lemma}[Rho minus of subgroup]\label{rhominus-subgroup}\lean{rhoMinus_of_subgroup}\uses{rhominus-def}\leanok If $H$ is a finite subgroup of $G$, then $\rho^-(U_H) = \log |A| - \log \max_t |A \cap (H+t)|$. \end{lemma} \begin{proof}\leanok \uses{log-sum} For every $G$-valued random variable $T$ that is independent of $Y$, $$D_{KL}(U_H \Vert U_A+T) = \sum_{h\in H} \frac{1}{|H|}\log\frac{1/|H|}{\mathbf{P}[U_A+T=h]}\ge -\log(\mathbf{P}[U_A+T\in H]),$$ by \Cref{log-sum}. Then observe that $$-\log(\mathbf{P}[U_A+T\in H])=-\log(\mathbf{P}[U_A\in H-T])\ge -\log(\max_{t\in G} \mathbf{P}[U_A\in H+t]).$$ This proves $\ge$. To get the equality, let $t^*:=\arg\max_t |A \cap (H+t)|$ and observe that $$\rho^-(U_H)\le D_{KL}(U_H \Vert U_A+(U_H-t^*))= \log |A| - \log \max_t|A \cap (H+t)|.$$ \end{proof}
lemma rhoMinus_of_subgroup [IsProbabilityMeasure μ] {H : AddSubgroup G} {U : Ω → G} (hunif : IsUniform H U μ) {A : Finset G} (hA : A.Nonempty) (hU : Measurable U) : ρ⁻[U ; μ # A] = log (Nat.card A) - log (sSup {Nat.card (A ∩ (t +ᵥ (H : Set G)) : Set G) | t : G} : ℕ) := by apply le_antisymm _ (le_rhoMinus_of_subgroup hunif hA hU) rcases exists_card_inter_add_eq_sSup (A := A) H hA with ⟨t, ht, hpos⟩ rw [← ht] have : Nonempty (A ∩ (t +ᵥ (H : Set G)) : Set G) := (Nat.card_pos_iff.1 hpos).1 exact rhoMinus_le_of_subgroup t hunif hA .of_subtype hU /-- If $H$ is a finite subgroup of $G$, then $\rho^+(U_H) = \log |H| - \log \max_t |A \cap (H+t)|$. -/
pfr/blueprint/src/chapter/further_improvement.tex:95
pfr/PFR/RhoFunctional.lean:635
PFR
rhoMinus_of_sum
\begin{lemma}[Rho and sums]\label{rho-sums}\lean{rhoMinus_of_sum, rhoPlus_of_sum, rho_of_sum}\leanok If $X,Y$ are independent, one has $$ \rho^-(X+Y) \leq \rho^-(X)$$ $$ \rho^+(X+Y) \leq \rho^+(X) + \bbH[X+Y] - \bbH[X]$$ and $$ \rho(X+Y) \leq \rho(X) + \frac{1}{2}( \bbH[X+Y] - \bbH[X] ).$$ \end{lemma} \begin{proof}\leanok \uses{kl-sums} The first inequality follows from \Cref{kl-sums}. The second and third inequalities are direct corollaries of the first. \end{proof}
lemma rhoMinus_of_sum [IsZeroOrProbabilityMeasure μ] (hX : Measurable X) (hY : Measurable Y) (hA : A.Nonempty) (h_indep : IndepFun X Y μ) : ρ⁻[X + Y ; μ # A] ≤ ρ⁻[X ; μ # A] := by rcases eq_zero_or_isProbabilityMeasure μ with hμ | hμ · simp [rhoMinus_zero_measure hμ] apply le_csInf (nonempty_rhoMinusSet hA) have : IsProbabilityMeasure (uniformOn (A : Set G)) := uniformOn_isProbabilityMeasure A.finite_toSet hA rintro - ⟨μ', μ'_prob, habs, rfl⟩ obtain ⟨Ω', hΩ', m, X', Y', T, U, hm, h_indep', hX', hY', hT, hU, hXX', hYY', hTμ, hU_unif⟩ := independent_copies4_nondep (X₁ := X) (X₂ := Y) (X₃ := id) (X₄ := id) hX hY measurable_id measurable_id μ μ μ' (uniformOn (A : Set G)) let _ : MeasureSpace Ω' := ⟨m⟩ have hP : (ℙ : Measure Ω') = m := rfl have hTU : IdentDistrib (T + U) (Prod.fst + Prod.snd) ℙ (μ'.prod (uniformOn (A : Set G))) := by apply IdentDistrib.add · exact hTμ.trans IdentDistrib.fst_id.symm · exact hU_unif.trans IdentDistrib.snd_id.symm · exact h_indep'.indepFun (i := 2) (j := 3) (by simp) · exact indepFun_fst_snd have hXY : IdentDistrib (X + Y) (X' + Y') μ ℙ := by apply IdentDistrib.add hXX'.symm hYY'.symm h_indep exact h_indep'.indepFun zero_ne_one have hX'TUY' : IndepFun (⟨X', T + U⟩) Y' ℙ := by have I : iIndepFun ![X', Y', T + U] m := ProbabilityTheory.iIndepFun.apply_two_last h_indep' hX' hY' hT hU (phi := fun a b ↦ a + b) (by fun_prop) exact (I.reindex_three_bac.pair_last_of_three hY' hX' (by fun_prop)).symm have I₁ : ρ⁻[X + Y ; μ # A] ≤ KL[X + Y ; μ # (T + Y') + U ; ℙ] := by apply rhoMinus_le (by fun_prop) hA _ (by fun_prop) (by fun_prop) · have : iIndepFun ![U, X', T, Y'] := h_indep'.reindex_four_dacb have : iIndepFun ![U, X', T + Y'] := this.apply_two_last (phi := fun a b ↦ a + b) hU hX' hT hY' (by fun_prop) apply this.indepFun (i := 2) (j := 0) simp · rw [hXY.map_eq] have : T + Y' + U = (T + U) + Y' := by abel rw [this] apply absolutelyContinuous_add_of_indep hX'TUY' hX' (by fun_prop) hY' rw [hTU.map_eq, hP, hXX'.map_eq] exact habs · exact isUniform_uniformOn.of_identDistrib hU_unif.symm A.measurableSet have I₂ : KL[X + Y ; μ # (T + Y') + U ; ℙ] = KL[X' + Y' # (T + U) + Y'] := by apply IdentDistrib.KLDiv_eq _ _ hXY have : T + Y' + U = T + U + Y' := by abel rw [this] apply IdentDistrib.refl fun_prop have I₃ : KL[X' + Y' # (T + U) + Y'] ≤ KL[X' # T + U] := by apply KLDiv_add_le_KLDiv_of_indep _ (by fun_prop) (by fun_prop) (by fun_prop) · rw [hTU.map_eq, hP, hXX'.map_eq] exact habs · exact hX'TUY' have I₄ : KL[X' # T + U] = KL[X ; μ # Prod.fst + Prod.snd ; μ'.prod (uniformOn (A : Set G))] := IdentDistrib.KLDiv_eq _ _ hXX' hTU exact ((I₁.trans_eq I₂).trans I₃).trans_eq I₄ /-- If $X,Y$ are independent, one has $$ \rho^+(X+Y) \leq \rho^+(X) + \bbH[X+Y] - \bbH[X]$$ -/
pfr/blueprint/src/chapter/further_improvement.tex:144
pfr/PFR/RhoFunctional.lean:767
PFR
rhoPlus_of_subgroup
\begin{corollary}[Rho plus of subgroup]\label{rhoplus-subgroup}\lean{rhoPlus_of_subgroup}\uses{rhoplus-def}\leanok If $H$ is a finite subgroup of $G$, then $\rho^+(U_H) = \log |H| - \log \max_t |A \cap (H+t)|$. \end{corollary} \begin{proof}\leanok \uses{rhominus-subgroup} Straightforward by definition and \Cref{rhominus-subgroup}. \end{proof}
lemma rhoPlus_of_subgroup [IsProbabilityMeasure μ] {H : AddSubgroup G} {U : Ω → G} (hunif : IsUniform H U μ) {A : Finset G} (hA : A.Nonempty) (hU : Measurable U) : ρ⁺[U ; μ # A] = log (Nat.card H) - log (sSup {Nat.card (A ∩ (t +ᵥ (H : Set G)) : Set G) | t : G} : ℕ) := by have : H[U ; μ] = log (Nat.card H) := hunif.entropy_eq' (toFinite _) hU rw [rhoPlus, rhoMinus_of_subgroup hunif hA hU, this] abel /-- We define $\rho(X) := (\rho^+(X) + \rho^-(X))/2$. -/ noncomputable def rho (X : Ω → G) (A : Finset G) (μ : Measure Ω ) : ℝ := (ρ⁻[X ; μ # A] + ρ⁺[X ; μ # A]) / 2 @[inherit_doc rho] notation3:max "ρ[" X " ; " μ " # " A "]" => rho X A μ @[inherit_doc rho] notation3:max "ρ[" X " # " A "]" => rho X A volume
pfr/blueprint/src/chapter/further_improvement.tex:107
pfr/PFR/RhoFunctional.lean:647
PFR
rhoPlus_of_sum
\begin{lemma}[Rho and sums]\label{rho-sums}\lean{rhoMinus_of_sum, rhoPlus_of_sum, rho_of_sum}\leanok If $X,Y$ are independent, one has $$ \rho^-(X+Y) \leq \rho^-(X)$$ $$ \rho^+(X+Y) \leq \rho^+(X) + \bbH[X+Y] - \bbH[X]$$ and $$ \rho(X+Y) \leq \rho(X) + \frac{1}{2}( \bbH[X+Y] - \bbH[X] ).$$ \end{lemma} \begin{proof}\leanok \uses{kl-sums} The first inequality follows from \Cref{kl-sums}. The second and third inequalities are direct corollaries of the first. \end{proof}
lemma rhoPlus_of_sum [IsZeroOrProbabilityMeasure μ] (hX : Measurable X) (hY : Measurable Y) (hA : A.Nonempty) (h_indep : IndepFun X Y μ) : ρ⁺[X + Y ; μ # A] ≤ ρ⁺[X ; μ # A] + H[X + Y ; μ] - H[X ; μ] := by simp [rhoPlus] have := rhoMinus_of_sum hX hY hA h_indep linarith /-- If $X,Y$ are independent, one has $$ \rho(X+Y) \leq \rho(X) + \frac{1}{2}( \bbH[X+Y] - \bbH[X] ).$$ -/
pfr/blueprint/src/chapter/further_improvement.tex:144
pfr/PFR/RhoFunctional.lean:827
PFR
rho_PFR_conjecture
\begin{proposition} \label{pfr-rho}\lean{rho_PFR_conjecture}\leanok For any random variables $Y_1,Y_2$, there exist a subgroup $H$ such that $$ 2\rho(U_H) \leq \rho(Y_1) + \rho(Y_2) + 8 d[Y_1;Y_2].$$ \end{proposition} \begin{proof}\leanok \uses{phi-min-exist, phi-minimizer-zero-distance,phi-min-def,rho-invariant,sym-zero} Let $X_1,X_2$ be a $\phi$-minimizer. By \Cref{phi-minimizer-zero-distance} $d[X_1;X_2]=0$, which by \Cref{phi-min-def} implies $\rho(X_1)+\rho(X_2)\le \rho(Y_1) + \rho(Y_2) + \frac{1}{\eta} d[Y_1;Y_2]$ for every $\eta<1/8$. Take the limit at $\eta=1/8$ to get $\rho(X_1)+\rho(X_2)\le \rho(Y_1) + \rho(Y_2) + 8 d[Y_1;Y_2]$. By \Cref{ruzsa-triangle} and \Cref{ruzsa-nonneg} we have $d[X_1;X_1]=d[X_2;X_2]=0$, and by \Cref{sym-zero} there are $H_1:=\mathrm{Sym}[X_1],H_2:=\mathrm{Sym}[X_2]$ such that $X_1=U_{H_1}+x_1$ and $X_2=U_{H_2}+x_2$ for some $x_2$. By \Cref{rho-invariant} we get $\rho(U_{H_1})+\rho(U_{H_2})\le \rho(Y_1) + \rho(Y_2) + 8 d[Y_1;Y_2]$, and thus the claim holds for $H=H_1$ or $H=H_2$. \end{proof}
theorem rho_PFR_conjecture [MeasurableSpace G] [DiscreteMeasurableSpace G] (Y₁ Y₂ : Ω → G) (hY₁ : Measurable Y₁) (hY₂ : Measurable Y₂) (A : Finset G) (hA : A.Nonempty) : ∃ (H : Submodule (ZMod 2) G) (Ω' : Type uG) (mΩ' : MeasureSpace Ω') (U : Ω' → G), IsProbabilityMeasure (ℙ : Measure Ω') ∧ Measurable U ∧ IsUniform H U ∧ 2 * ρ[U # A] ≤ ρ[Y₁ # A] + ρ[Y₂ # A] + 8 * d[Y₁ # Y₂] := by obtain ⟨Ω', mΩ', X₁, X₂, hX₁, hX₂, hP, htau_min, hdist⟩ := phiMinimizer_exists_rdist_eq_zero hA wlog h : ρ[X₁ # A] ≤ ρ[X₂ # A] generalizing X₁ X₂ · rw [rdist_symm] at hdist exact this X₂ X₁ hX₂ hX₁ (phiMinimizes_comm htau_min) hdist (by linarith) -- use for `U` a translate of `X` to make sure that `0` is in its support. obtain ⟨x₀, h₀⟩ : ∃ x₀, ℙ (X₁⁻¹' {x₀}) ≠ 0 := by by_contra! h have A a : (ℙ : Measure Ω').map X₁ {a} = 0 := by rw [Measure.map_apply hX₁ .of_discrete] exact h _ have B : (ℙ : Measure Ω').map X₁ = 0 := by rw [← Measure.sum_smul_dirac (μ := (ℙ : Measure Ω').map X₁)] simp [A] have : IsProbabilityMeasure ((ℙ : Measure Ω').map X₁) := isProbabilityMeasure_map hX₁.aemeasurable exact IsProbabilityMeasure.ne_zero _ B have h_unif : IsUniform (symmGroup X₁ hX₁) (fun ω ↦ X₁ ω - x₀) := by have h' : d[X₁ # X₁] = 0 := by apply le_antisymm _ (rdist_nonneg hX₁ hX₁) calc d[X₁ # X₁] ≤ d[X₁ # X₂] + d[X₂ # X₁] := rdist_triangle hX₁ hX₂ hX₁ _ = 0 := by rw [hdist, rdist_symm, hdist, zero_add] exact isUniform_sub_const_of_rdist_eq_zero hX₁ h' h₀ refine ⟨AddSubgroup.toZModSubmodule 2 (symmGroup X₁ hX₁), Ω', by infer_instance, fun ω ↦ X₁ ω - x₀, by infer_instance, by fun_prop, by exact h_unif, ?_⟩ have J : d[X₁ # X₂] + (1/8) * (ρ[X₁ # A] + ρ[X₂ # A]) ≤ d[Y₁ # Y₂] + (1/8) * (ρ[Y₁ # A] + ρ[Y₂ # A]) := by have Z := le_rdist_of_phiMinimizes htau_min hY₁ hY₂ (μ₁ := ℙ) (μ₂ := ℙ) linarith rw [hdist, zero_add] at J have : ρ[fun ω ↦ X₁ ω - x₀ # A] = ρ[X₁ # A] := by simp_rw [sub_eq_add_neg, rho_of_translate hX₁ hA] linarith /-- If $|A+A| \leq K|A|$, then there exists a subgroup $H$ and $t\in G$ such that $|A \cap (H+t)| \geq K^{-4} \sqrt{|A||V|}$, and $|H|/|A|\in[K^{-8},K^8]$. -/
pfr/blueprint/src/chapter/further_improvement.tex:336
pfr/PFR/RhoFunctional.lean:1936
PFR
rho_continuous
\begin{lemma}[Rho continuous]\label{rho-cts}\lean{rho_continuous}\leanok\uses{rho-def} $\rho(X)$ depends continuously on the distribution of $X$. \end{lemma} \begin{proof} \leanok Clear from definition. \end{proof}
/-- \rho(X)$ depends continuously on the distribution of $X$. -/ lemma rho_continuous [TopologicalSpace G] [DiscreteTopology G] [BorelSpace G] {A : Finset G} (hA : A.Nonempty) : Continuous fun μ : ProbabilityMeasure G ↦ ρ[(id : G → G) ; μ # A] := ((rhoMinus_continuous hA).add (rhoPlus_continuous hA)).div_const _
pfr/blueprint/src/chapter/further_improvement.tex:138
pfr/PFR/RhoFunctional.lean:744
PFR
rho_of_subgroup
\begin{lemma}[Rho of subgroup]\label{rho-subgroup}\uses{rho-def}\lean{rho_of_subgroup}\leanok If $H$ is a finite subgroup of $G$, and $\rho(U_H) \leq r$, then there exists $t$ such that $|A \cap (H+t)| \geq e^{-r} \sqrt{|A||H|}$, and $|H|/|A|\in[e^{-2r},e^{2r}]$. \end{lemma} \begin{proof}\leanok\uses{rhominus-subgroup, rhoplus-subgroup} The first claim is a direct corollary of \Cref{rhominus-subgroup} and \Cref{rhoplus-subgroup}. To see the second claim, observe that \Cref{rhominus-nonneg} and \Cref{rhoplus-subgroup} imply $\rho^-(U_H),\rho^+(U_H)\ge 0$. Therefore $$|H(U_A)-H(U_H)|=|\rho^+(U_H)-\rho^-(U_H)|\le \rho^-(U_H)+\rho^+(U_H)= 2\rho(U_H)\le 2r,$$ which implies the second claim. \end{proof}
lemma rho_of_subgroup [IsProbabilityMeasure μ] {H : AddSubgroup G} {U : Ω → G} (hunif : IsUniform H U μ) {A : Finset G} (hA : A.Nonempty) (hU : Measurable U) (r : ℝ) (hr : ρ[U ; μ # A] ≤ r) : ∃ t : G, exp (-r) * Nat.card A ^ (1/2 : ℝ) * Nat.card H ^ (1/2 : ℝ) ≤ Nat.card ↑(↑A ∩ (t +ᵥ (H : Set G))) ∧ Nat.card A ≤ exp (2 * r) * Nat.card H ∧ Nat.card H ≤ exp (2 * r) * Nat.card A := by have hr' : ρ[U ; μ # A] ≤ r := hr have Hpos : 0 < (Nat.card H : ℝ) := by exact_mod_cast Nat.card_pos have : Nonempty A := hA.to_subtype have Apos : 0 < (Nat.card A : ℝ) := by exact_mod_cast Nat.card_pos simp only [rho] at hr rw [rhoMinus_of_subgroup hunif hA hU, rhoPlus_of_subgroup hunif hA hU] at hr rcases exists_card_inter_add_eq_sSup (A := A) H hA with ⟨t, ht, hpos⟩ rw [← ht] at hr have Rm : 0 ≤ ρ⁻[U ; μ # A] := rhoMinus_nonneg hU have RM : 0 ≤ ρ⁺[U ; μ # A] := by rw [rhoPlus_of_subgroup hunif hA hU, ← ht, sub_nonneg] apply log_le_log (mod_cast hpos) norm_cast have : Nat.card (t +ᵥ (H : Set G) : Set G) = Nat.card H := by apply Nat.card_image_of_injective (add_right_injective t) rw [← this] exact Nat.card_mono (toFinite _) inter_subset_right have I : |log (Nat.card H) - log (Nat.card A)| ≤ 2 * r := calc |log (Nat.card H) - log (Nat.card A)| _ = |H[U ; μ] - log (Nat.card A)| := by rw [hunif.entropy_eq' (toFinite _) hU]; rfl _ = |ρ⁺[U ; μ # A] - ρ⁻[U ; μ # A]| := by congr 1; simp [rhoPlus]; abel _ ≤ ρ⁺[U ; μ # A] + ρ⁻[U ; μ # A] := (abs_sub _ _).trans_eq (by simp [abs_of_nonneg, Rm, RM]) _ = 2 * ρ[U ; μ # A] := by simp [rho]; ring _ ≤ 2 * r := by linarith refine ⟨t, ?_, ?_, ?_⟩ · have : - r + (log (Nat.card A) + log (Nat.card H)) * (1 / 2 : ℝ) ≤ log (Nat.card (A ∩ (t +ᵥ (H : Set G)) : Set G)) := by linarith have := exp_monotone this rwa [exp_add, exp_log (mod_cast hpos), exp_mul, exp_add, exp_log Hpos, exp_log Apos, mul_rpow, ← mul_assoc] at this <;> positivity · have : log (Nat.card A) ≤ 2 * r + log (Nat.card H) := by linarith [(abs_sub_le_iff.1 I).2] have := exp_monotone this rwa [exp_log Apos, exp_add, exp_log Hpos] at this · have : log (Nat.card H) ≤ 2 * r + log (Nat.card A) := by linarith [(abs_sub_le_iff.1 I).1] have := exp_monotone this rwa [exp_log Hpos, exp_add, exp_log Apos] at this /-- If $H$ is a finite subgroup of $G$, and $\rho(U_H) \leq r$, then there exists $t$ such that $|A \cap (H+t)| \geq e^{-r} \sqrt{|A||H|}$, and $|H|/|A| \in [e^{-2r}, e^{2r}]$. -/
pfr/blueprint/src/chapter/further_improvement.tex:122
pfr/PFR/RhoFunctional.lean:683
PFR
rho_of_sum
\begin{lemma}[Rho and sums]\label{rho-sums}\lean{rhoMinus_of_sum, rhoPlus_of_sum, rho_of_sum}\leanok If $X,Y$ are independent, one has $$ \rho^-(X+Y) \leq \rho^-(X)$$ $$ \rho^+(X+Y) \leq \rho^+(X) + \bbH[X+Y] - \bbH[X]$$ and $$ \rho(X+Y) \leq \rho(X) + \frac{1}{2}( \bbH[X+Y] - \bbH[X] ).$$ \end{lemma} \begin{proof}\leanok \uses{kl-sums} The first inequality follows from \Cref{kl-sums}. The second and third inequalities are direct corollaries of the first. \end{proof}
lemma rho_of_sum [IsZeroOrProbabilityMeasure μ] (hX : Measurable X) (hY : Measurable Y) (hA : A.Nonempty) (h_indep : IndepFun X Y μ) : ρ[X + Y ; μ # A] ≤ ρ[X ; μ # A] + (H[X+Y ; μ] - H[X ; μ])/2 := by simp [rho, rhoPlus] have := rhoMinus_of_sum hX hY hA h_indep linarith private lemma rho_le_translate [IsZeroOrProbabilityMeasure μ] (hX : Measurable X) (hA : A.Nonempty) (s : G) : ρ[(fun ω ↦ X ω + s) ; μ # A] ≤ ρ[X ; μ # A] := by have : ρ[(fun ω ↦ X ω + s) ; μ # A] ≤ ρ[X ; μ # A] + (H[fun ω ↦ X ω + s ; μ] - H[X ; μ]) / 2 := rho_of_sum (Y := fun ω ↦ s) hX measurable_const hA (indepFun_const s) have : H[fun ω ↦ X ω + s ; μ] = H[X ; μ] := entropy_add_const hX _ linarith
pfr/blueprint/src/chapter/further_improvement.tex:144
pfr/PFR/RhoFunctional.lean:837
PFR
rho_of_sum_le
\begin{lemma}[Rho and sums, symmetrized]\label{rho-sums-sym}\lean{rho_of_sum_le}\leanok If $X,Y$ are independent, then $$ \rho(X+Y) \leq \frac{1}{2}(\rho(X)+\rho(Y) + d[X;Y]).$$ \end{lemma} \begin{proof}\leanok \uses{rho-sums} Apply \Cref{rho-sums} for $(X,Y)$ and $(Y,X)$ and take their average. \end{proof}
lemma rho_of_sum_le [IsZeroOrProbabilityMeasure μ] (hX : Measurable X) (hY : Measurable Y) (hA : A.Nonempty) (h_indep : IndepFun X Y μ) : ρ[X + Y ; μ # A] ≤ (ρ[X ; μ # A] + ρ[Y ; μ # A] + d[ X ; μ # Y ; μ]) / 2 := by have I : ρ[X + Y ; μ # A] ≤ ρ[X ; μ # A] + (H[X+Y ; μ] - H[X ; μ])/2 := rho_of_sum hX hY hA h_indep have J : ρ[Y + X ; μ # A] ≤ ρ[Y ; μ # A] + (H[Y+X ; μ] - H[Y ; μ ])/2 := rho_of_sum hY hX hA h_indep.symm have : Y + X = X + Y := by abel rw [this] at J have : X - Y = X + Y := ZModModule.sub_eq_add _ _ rw [h_indep.rdist_eq hX hY, sub_eq_add_neg, this] linarith /-- If $X,Y$ are independent, then $$ \rho(X | X+Y) \leq \frac{1}{2}(\rho(X)+\rho(Y) + d[X;Y]).$$ -/
pfr/blueprint/src/chapter/further_improvement.tex:190
pfr/PFR/RhoFunctional.lean:1061
PFR
rho_of_translate
\begin{lemma}[Rho invariant]\label{rho-invariant}\lean{rho_of_translate}\leanok\uses{rho-def} For any $s \in G$, $\rho(X+s) = \rho(X)$. \end{lemma} \begin{proof}\leanok\uses{kl-div-inj} Observe that by \Cref{kl-div-inj}, $$\inf_T D_{KL}(X\Vert U_A+T)=\inf_T D_{KL}(X+s\Vert U_A+T+s)=\inf_{T'} D_{KL}(X+s\Vert U_A+T').$$ \end{proof}
lemma rho_of_translate [IsZeroOrProbabilityMeasure μ] (hX : Measurable X) (hA : A.Nonempty) (s : G) : ρ[(fun ω ↦ X ω + s) ; μ # A] = ρ[X ; μ # A] := by apply le_antisymm (rho_le_translate hX hA s) convert rho_le_translate (X := fun ω ↦ X ω + s) (by fun_prop) hA (-s) (μ := μ) with ω abel -- This may not be the optimal spelling for condRho, feel free to improve /-- We define $\rho(X|Y) := \sum_y {\bf P}(Y=y) \rho(X|Y=y)$. -/ noncomputable def condRho {S : Type*} (X : Ω → G) (Y : Ω → S) (A : Finset G) (μ : Measure Ω): ℝ := ∑' s, (μ (Y ⁻¹' {s})).toReal * ρ[X ; μ[|Y ← s] # A] /-- Average of rhoMinus along the fibers-/ noncomputable def condRhoMinus {S : Type*} (X : Ω → G) (Y : Ω → S) (A : Finset G) (μ : Measure Ω) : ℝ := ∑' s, (μ (Y ⁻¹' {s})).toReal * ρ⁻[X ; μ[|Y ← s] # A] /-- Average of rhoPlus along the fibers-/ noncomputable def condRhoPlus {S : Type*} (X : Ω → G) (Y : Ω → S) (A : Finset G) (μ : Measure Ω) : ℝ := ∑' s, (μ (Y ⁻¹' {s})).toReal * ρ⁺[X ; μ[|Y ← s] # A] @[inherit_doc condRho] notation3:max "ρ[" X " | " Z " ; " μ " # " A "]" => condRho X Z A μ @[inherit_doc condRho] notation3:max "ρ[" X " | " Z " # " A "]" => condRho X Z A volume @[inherit_doc condRhoMinus] notation3:max "ρ⁻[" X " | " Z " ; " μ " # " A "]" => condRhoMinus X Z A μ @[inherit_doc condRhoPlus] notation3:max "ρ⁺[" X " | " Z " ; " μ " # " A "]" => condRhoPlus X Z A μ
pfr/blueprint/src/chapter/further_improvement.tex:132
pfr/PFR/RhoFunctional.lean:852
PFR
second_estimate
\begin{lemma}[Second estimate]\label{second-estimate} \lean{second_estimate}\leanok We have $$ I_2 \leq 2 \eta k + \frac{2 \eta (2 \eta k - I_1)}{1 - \eta}.$$ \end{lemma} \begin{proof} \uses{cor-fibre,distance-lower,first-useful,second-estimate-aux}\leanok We apply \Cref{cor-fibre}, but now with the choice \[ (Y_1,Y_2,Y_3,Y_4) := (X_2, X_1, \tilde X_2, \tilde X_1). \] Now \Cref{cor-fibre} can be rewritten as \begin{align*} &d[X_1+\tilde X_1;X_2+\tilde X_2] + d[X_1|X_1+\tilde X_1; X_2|X_2+\tilde X_2] \\ &\quad + \bbI[X_1+X_2 : X_1 + \tilde X_1 \,|\, X_1+X_2+\tilde X_1+\tilde X_2] = 2k, \end{align*} recalling once again that $k := d[X_1;X_2]$. From \Cref{cond-distance-lower} one has \begin{align*} d[X_1|X_1+\tilde X_1; X_2|X_2+\tilde X_2] \geq k &- \eta (d[X^0_1;X_1] - d[X^0_1;X_1|X_1+\tilde X_1]) \\& - \eta (d[X^0_2;X_2] - d[X^0_2;X_2|X_2+\tilde X_2]) . \end{align*} while from \Cref{first-useful} we have \[ d[X^0_1;X_1|X_1+\tilde X_1] - d[X^0_1;X_1] \leq \tfrac{1}{2} d[X_1;X_1], \] and \[ d[X^0_2;X_2|X_2+\tilde X_2] - d[X^0_2;X_2] \leq \tfrac{1}{2} d[X_1;X_2]. \] Combining all these inequalities with \Cref{dist-sums}, we have \begin{equation}\label{combined} \bbI[X_1+X_2 : X_1 + \tilde X_1 | X_1+X_2+\tilde X_1+\tilde X_2] \leq \eta ( d[X_1; X_1] + d[X_2; X_2] ). \end{equation} Together with \Cref{second-estimate-aux}, this gives the conclusion. \end{proof}
/-- $$ I_2 \leq 2 \eta k + \frac{2 \eta (2 \eta k - I_1)}{1 - \eta}.$$ -/ lemma second_estimate : I₂ ≤ 2 * p.η * k + (2 * p.η * (2 * p.η * k - I₁)) / (1 - p.η) := by have hX₁_indep : IndepFun X₁ X₁' (μ := ℙ) := h_indep.indepFun (show 0 ≠ 2 by decide) have hX₂_indep : IndepFun X₂ X₂' (μ := ℙ) := h_indep.indepFun (show 1 ≠ 3 by decide) let Y : Fin 4 → Ω → G := ![X₂, X₁, X₂', X₁'] have hY : ∀ i, Measurable (Y i) := fun i => by fin_cases i <;> assumption have hY_indep : iIndepFun Y := by exact h_indep.reindex_four_badc have h := sum_of_rdist_eq_char_2 Y hY_indep hY rw [show Y 0 = X₂ by rfl, show Y 1 = X₁ by rfl, show Y 2 = X₂' by rfl, show Y 3 = X₁' by rfl] at h rw [← h₂.rdist_eq h₁, rdist_symm, rdist_symm (X := X₂ + X₂'), condRuzsaDist_symm (Z := X₂ + X₂') (W := X₁ + X₁') (hX₂.add hX₂') (hX₁.add hX₁'), ← two_mul] at h replace h : 2 * k = d[X₁ + X₁' # X₂ + X₂'] + d[X₁ | X₁ + X₁' # X₂ | X₂ + X₂'] + I[X₁ + X₂ : X₁ + X₁'|X₁ + X₂ + X₁' + X₂'] := by convert h using 3 <;> abel have h' := condRuzsaDistance_ge_of_min p h_min hX₁ hX₂ (X₁ + X₁') (X₂ + X₂') (hX₁.add hX₁') (hX₂.add hX₂') have h₁' := condRuzsaDist_diff_le''' ℙ p.hmeas1 hX₁ hX₁' hX₁_indep have h₂' := condRuzsaDist_diff_le''' ℙ p.hmeas2 hX₂ hX₂' hX₂_indep rw [h₁.entropy_eq, add_sub_cancel_right, ← (IdentDistrib.refl hX₁.aemeasurable).rdist_eq h₁] at h₁' rw [h₂.entropy_eq, add_sub_cancel_right, ← (IdentDistrib.refl hX₂.aemeasurable).rdist_eq h₂] at h₂' have h'' : I₂ ≤ p.η * (d[X₁ # X₁] + d[X₂ # X₂]) := by simp_rw [← add_comm X₁ X₁'] have h₁'' := mul_le_mul_of_nonneg_left h₁' (show 0 ≤ p.η by linarith [p.hη]) have h₂'' := mul_le_mul_of_nonneg_left h₂' (show 0 ≤ p.η by linarith [p.hη]) have := rdist_of_sums_ge' p _ _ _ _ hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep h_min linarith nth_rewrite 1 [mul_div_assoc, ← mul_add, mul_assoc, mul_left_comm] refine h''.trans (mul_le_mul_of_nonneg_left ?_ (show 0 ≤ p.η by linarith [p.hη])) exact second_estimate_aux p X₁ X₂ X₁' X₂' hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep h_min
pfr/blueprint/src/chapter/entropy_pfr.tex:202
pfr/PFR/SecondEstimate.lean:95
PFR
second_estimate_aux
\begin{lemma}\label{second-estimate-aux}\lean{second_estimate_aux}\leanok We have \[d[X_1;X_1] + d[X_2;X_2] \leq 2 k + \frac{2(2 \eta k - I_1)}{1-\eta}. \] \end{lemma} \begin{proof} \uses{ruz-indep, foursum-bound, dist-sums}\leanok We may use \Cref{ruz-indep} to expand \begin{align*} & d[X_1+\tilde X_1;X_2+\tilde X_2] \\ &= \bbH[X_1+\tilde X_1 + X_2 + \tilde X_2] - \tfrac{1}{2} \bbH[X_1+\tilde X_1] - \tfrac{1}{2} \bbH[X_2+\tilde X_2] \\ &= \bbH[X_1+\tilde X_1 + X_2 + \tilde X_2] - \tfrac{1}{2} \bbH[X_1] - \tfrac{1}{2} \bbH[X_2] \\ & \qquad\qquad\qquad - \tfrac{1}{2} \left( d[X_1;X_1] + d[X_2; X_2] \right), \end{align*} and hence by \Cref{foursum-bound} \[ d[X_1+\tilde X_1; X_2+\tilde X_2] \leq (2+\eta) k - \tfrac{1}{2} \left( d[X_1;X_1] + d[X_2;X_2] \right) - I_1. \] Combining this bound with \Cref{dist-sums} we obtain the result. \end{proof}
lemma second_estimate_aux : d[X₁ # X₁] + d[X₂ # X₂] ≤ 2 * (k + (2 * p.η * k - I₁) / (1 - p.η)) := by have hX₁_indep : IndepFun X₁ X₁' (μ := ℙ) := h_indep.indepFun (show 0 ≠ 2 by decide) have hX₂_indep : IndepFun X₂ X₂' (μ := ℙ) := h_indep.indepFun (show 1 ≠ 3 by decide) have hX_indep : IndepFun (X₁ + X₁') (X₂ + X₂') := by exact h_indep.indepFun_add_add (ι := Fin 4) (by intro i; fin_cases i <;> assumption) 0 2 1 3 (by decide) (by decide) (by decide) (by decide) have h : d[X₁ + X₁' # X₂+ X₂'] ≤ (2 + p.η) * k - (d[X₁# X₁] + d[X₂ # X₂]) / 2 - I₁ := by have h := hX_indep.rdist_eq (hX₁.add hX₁') (hX₂.add hX₂') rw [ZModModule.sub_eq_add (X₁ + X₁') (X₂ + X₂'), ← ZModModule.sub_eq_add X₁ X₁', ← ZModModule.sub_eq_add X₂ X₂', sub_eq_iff_eq_add.mp (sub_eq_iff_eq_add.mp (hX₁_indep.rdist_eq hX₁ hX₁').symm), sub_eq_iff_eq_add.mp (sub_eq_iff_eq_add.mp (hX₂_indep.rdist_eq hX₂ hX₂').symm), ← h₁.entropy_eq, ← h₂.entropy_eq, add_assoc, add_assoc, add_halves, add_halves, ← (IdentDistrib.refl hX₁.aemeasurable).rdist_eq h₁, ← (IdentDistrib.refl hX₂.aemeasurable).rdist_eq h₂, ZModModule.sub_eq_add X₁ X₁', ZModModule.sub_eq_add X₂ X₂', ← add_assoc, add_right_comm _ X₁'] at h have h_indep' : iIndepFun ![X₁, X₂, X₂', X₁'] := by exact h_indep.reindex_four_abdc have h' := ent_ofsum_le p X₁ X₂ X₁' X₂' hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep' h_min convert (h.symm ▸ (sub_le_sub_right (sub_le_sub_right h' _) _)) using 1; ring have h' := (rdist_of_sums_ge' p X₁ X₂ X₁' X₂' hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep h_min).le.trans h rw [← div_le_iff₀' two_pos, ← sub_le_iff_le_add', le_div_iff₀ (by linarith [p.hη'])] linarith include h_min hX₁ hX₁' hX₂ hX₂' h_indep h₁ h₂ in
pfr/blueprint/src/chapter/entropy_pfr.tex:183
pfr/PFR/SecondEstimate.lean:68
PFR
single_fibres
\begin{lemma}\label{single-fibres}\lean{single_fibres}\leanok Let $\phi:G\to H$ be a homomorphism and $A,B\subseteq G$ be finite subsets. If $x,y\in H$ then let $A_x=A\cap \phi^{-1}(x)$ and $B_y=B\cap \phi^{-1}(y)$. There exist $x,y\in H$ such that $A_x,B_y$ are both non-empty and \[d[\phi(U_A);\phi(U_B)]\log \frac{\lvert A\rvert\lvert B\rvert}{\lvert A_x\rvert\lvert B_y\rvert}\leq (\mathbb{H}(\phi(U_A))+\mathbb{H}(\phi(U_B)))(d(U_A,U_B)-d(U_{A_x},U_{B_y})).\] \end{lemma} \begin{proof} \uses{fibring-ident}\leanok The random variables $(U_A\mid \phi(U_A)=x)$ and $(U_B\mid \phi(U_B)=y)$ are equal in distribution to $U_{A_x}$ and $U_{B_y}$ respectively (both are uniformly distributed over their respective fibres). It follows from \Cref{fibring-ident} that \begin{align*} \sum_{x,y\in H}\frac{\lvert A_x\rvert\lvert B_y\rvert}{\lvert A\rvert\lvert B\rvert}d[U_{A_x};U_{B_y}] &=d[U_A\mid \phi(U_A); U_B\mid \phi(U_B)]\\ &\leq d[U_A;U_B]-d[\phi(U_A);\phi(U_B)]. \end{align*} Therefore with $M:=\mathbb{H}(\phi(U_A))+\mathbb{H}(\phi(U_B))$ we have \[\left(\sum_{x,y\in H}\frac{\lvert A_x\rvert\lvert B_y\rvert}{\lvert A\rvert\lvert B\rvert}Md[U_{A_x};U_{B_y}]\right)+Md[\phi(U_A);\phi(U_B)]\leq Md[U_A;U_B].\] Since \[M=\sum_{x,y\in H}\frac{\lvert A_x\rvert\lvert B_y\rvert}{\lvert A\rvert\lvert B\rvert}\log \frac{\lvert A\rvert\lvert B\rvert}{\lvert A_x\rvert\lvert B_y\rvert}\] we have \[\sum_{x,y\in H} \frac{\lvert A_x\rvert\lvert B_y\rvert}{\lvert A\rvert\lvert B\rvert}\left(Md[U_{A_x};U_{B_y}]+d[\phi(U_A);\phi(U_B)]\log \frac{\lvert A\rvert\lvert B\rvert}{\lvert A_x\rvert\lvert B_y\rvert}\right)\leq Md[U_A;U_B].\] It follows that there exists some $x,y\in H$ such that $\lvert A_x\rvert,\lvert B_y\rvert\neq 0$ and \[Md[U_{A_x};U_{B_y}]+d[\phi(U_A);\phi(U_B)]\log \frac{\lvert A\rvert\lvert B\rvert}{\lvert A_x\rvert\lvert B_y\rvert}\leq Md[U_A;U_B].\] \end{proof}
lemma single_fibres {G H Ω Ω': Type*} [AddCommGroup G] [Countable G] [MeasurableSpace G] [MeasurableSingletonClass G] [AddCommGroup H] [Countable H] [MeasurableSpace H] [MeasurableSingletonClass H] [MeasureSpace Ω] [MeasureSpace Ω'] [IsProbabilityMeasure (ℙ : Measure Ω)] [IsProbabilityMeasure (ℙ : Measure Ω')] (φ : G →+ H) {A B : Set G} [Finite A] [Finite B] {UA : Ω → G} {UB : Ω' → G} (hA : A.Nonempty) (hB : B.Nonempty) (hUA': Measurable UA) (hUB': Measurable UB) (hUA : IsUniform A UA) (hUB : IsUniform B UB) (hUA_mem : ∀ ω, UA ω ∈ A) (hUB_mem : ∀ ω, UB ω ∈ B) : ∃ (x y : H) (Ax By : Set G), Ax = A ∩ φ.toFun ⁻¹' {x} ∧ By = B ∩ φ.toFun ⁻¹' {y} ∧ Ax.Nonempty ∧ By.Nonempty ∧ d[φ.toFun ∘ UA # φ.toFun ∘ UB] * log (Nat.card A * Nat.card B / ((Nat.card Ax) * (Nat.card By))) ≤ (H[φ.toFun ∘ UA] + H[φ.toFun ∘ UB]) * (d[UA # UB] - dᵤ[Ax # By]) := by have : Nonempty A := hA.to_subtype have : Nonempty B := hB.to_subtype have : FiniteRange UA := finiteRange_of_finset UA A.toFinite.toFinset (by simpa) have : FiniteRange UB := finiteRange_of_finset UB B.toFinite.toFinset (by simpa) have hUA_coe : IsUniform A.toFinite.toFinset.toSet UA := by rwa [Set.Finite.coe_toFinset] have hUB_coe : IsUniform B.toFinite.toFinset.toSet UB := by rwa [Set.Finite.coe_toFinset] let A_ (x : H) : Set G := A ∩ φ.toFun ⁻¹' {x} let B_ (y : H) : Set G := B ∩ φ.toFun ⁻¹' {y} let X : Finset H := FiniteRange.toFinset (φ.toFun ∘ UA) let Y : Finset H := FiniteRange.toFinset (φ.toFun ∘ UB) have h_Ax (x : X) : Nonempty (A_ x.val) := by obtain ⟨ω, hω⟩ := (FiniteRange.mem_iff _ _).mp x.property use UA ω; exact Set.mem_inter (hUA_mem ω) hω have h_By (y : Y) : Nonempty (B_ y.val) := by obtain ⟨ω, hω⟩ := (FiniteRange.mem_iff _ _).mp y.property use UB ω; exact Set.mem_inter (hUB_mem ω) hω have h_AX (a : A) : φ.toFun a.val ∈ X := by obtain ⟨ω, hω⟩ := hUA_coe.nonempty_preimage_of_mem hUA' (A.toFinite.mem_toFinset.mpr a.property) exact (FiniteRange.mem_iff _ (φ.toFun a.val)).mpr ⟨ω, congr_arg _ hω⟩ have h_BY (b : B) : φ.toFun b.val ∈ Y := by obtain ⟨ω, hω⟩ := hUB_coe.nonempty_preimage_of_mem hUB' (B.toFinite.mem_toFinset.mpr b.property) exact (FiniteRange.mem_iff _ (φ.toFun b.val)).mpr ⟨ω, congr_arg _ hω⟩ let φ_AX (a : A) : X := by use φ.toFun a.val; exact h_AX a let φ_BY (b : B) : Y := by use φ.toFun b.val; exact h_BY b have h_φ_AX (x : X) : A_ x.val = φ_AX ⁻¹' {x} := by ext; simp [A_, φ_AX]; simp [Subtype.ext_iff] have h_φ_BY (y : Y) : B_ y.val = φ_BY ⁻¹' {y} := by ext; simp [B_, φ_BY]; simp [Subtype.ext_iff] let p (x : H) (y : H) : ℝ := (Nat.card (A_ x).Elem) * (Nat.card (B_ y).Elem) / ((Nat.card A.Elem) * (Nat.card B.Elem)) have : ∑ x ∈ X, ∑ y ∈ Y, (p x y) * dᵤ[A_ x # B_ y] ≤ d[UA # UB] - d[φ.toFun ∘ UA # φ.toFun ∘ UB] := calc _ = d[UA | φ.toFun ∘ UA # UB | φ.toFun ∘ UB] := by rewrite [condRuzsaDist_eq_sum hUA' (.comp .of_discrete hUA') hUB' (.comp .of_discrete hUB')] refine Finset.sum_congr rfl <| fun x hx ↦ Finset.sum_congr rfl <| fun y hy ↦ ?_ have : Nonempty (A_ x) := h_Ax ⟨x, hx⟩ have : Nonempty (B_ y) := h_By ⟨y, hy⟩ let μx := (ℙ : Measure Ω)[|(φ.toFun ∘ UA) ⁻¹' {x}] have hμx : IsProbabilityMeasure μx := by apply ProbabilityTheory.cond_isProbabilityMeasure rw [Set.preimage_comp] apply hUA_coe.measure_preimage_ne_zero hUA' rw [Set.inter_comm, Set.Finite.coe_toFinset] exact .of_subtype let μy := (ℙ : Measure Ω')[|(φ.toFun ∘ UB) ⁻¹' {y}] have hμy : IsProbabilityMeasure μy := by apply ProbabilityTheory.cond_isProbabilityMeasure rw [Set.preimage_comp] apply hUB_coe.measure_preimage_ne_zero hUB' rw [Set.inter_comm, Set.Finite.coe_toFinset] exact .of_subtype have h_μ_unif : IsUniform (A_ x) UA μx ∧ IsUniform (B_ y) UB μy := by have : _ ∧ _ := ⟨hUA.restrict hUA' (φ.toFun ⁻¹' {x}), hUB.restrict hUB' (φ.toFun ⁻¹' {y})⟩ rwa [Set.inter_comm _ A, Set.inter_comm _ B] at this rw [setRuzsaDist_eq_rdist h_μ_unif.1 h_μ_unif.2 hUA' hUB'] show _ = (Measure.real _ (UA ⁻¹' (_ ⁻¹' _))) * (Measure.real _ (UB ⁻¹' (_ ⁻¹' _))) * _ rewrite [hUA_coe.measureReal_preimage hUA', hUB_coe.measureReal_preimage hUB'] simp_rw [p, A_, B_, IsProbabilityMeasure.measureReal_univ, one_mul] rewrite [mul_div_mul_comm, Set.inter_comm A, Set.inter_comm B] simp only [Set.Finite.coe_toFinset, Set.Finite.mem_toFinset, Finset.mem_val]; rfl _ ≤ d[UA # UB] - d[φ.toFun ∘ UA # φ.toFun ∘ UB] := by rewrite [ZeroHom.toFun_eq_coe, AddMonoidHom.toZeroHom_coe] linarith only [rdist_le_sum_fibre φ hUA' hUB' (μ := ℙ) (μ' := ℙ)] let M := H[φ.toFun ∘ UA] + H[φ.toFun ∘ UB] have hM : M = ∑ x ∈ X, ∑ y ∈ Y, Real.negMulLog (p x y) := by have h_compl {x y} (h_notin : (x, y) ∉ X ×ˢ Y) : Real.negMulLog (p x y) = 0 := by unfold p rewrite [Finset.mem_product, not_and_or] at h_notin suffices A_ x = ∅ ∨ B_ y = ∅ by obtain h | h := this <;> rw [h] <;> simp refine h_notin.imp ?_ ?_ · rw [← not_nonempty_iff_eq_empty] rintro h ⟨a, ha, rfl⟩ exact h (h_AX ⟨a, ha⟩) · rw [← not_nonempty_iff_eq_empty] rintro h ⟨a, ha, rfl⟩ exact h (h_BY ⟨a, ha⟩) unfold M unfold entropy have : IsProbabilityMeasure (.map (φ ∘ UA) ℙ) := isProbabilityMeasure_map (.comp_measurable .of_discrete hUA') have : IsProbabilityMeasure (.map (φ ∘ UB) ℙ) := isProbabilityMeasure_map (.comp_measurable .of_discrete hUB') rewrite [← Finset.sum_product', ← tsum_eq_sum fun _ ↦ h_compl, ← measureEntropy_prod] apply tsum_congr; intro; congr rewrite [← Set.singleton_prod_singleton, Measure.smul_apply, Measure.prod_prod, Measure.map_apply (.comp .of_discrete hUA') (MeasurableSet.singleton _), Measure.map_apply (.comp .of_discrete hUB') (MeasurableSet.singleton _), Set.preimage_comp, hUA_coe.measure_preimage hUA', Set.preimage_comp, hUB_coe.measure_preimage hUB'] simp [p, A_, B_, mul_div_mul_comm, Set.inter_comm, ENNReal.toReal_div] have h_sum : ∑ x ∈ X, ∑ y ∈ Y, (p x y) * (M * dᵤ[A_ x # B_ y] + d[φ.toFun ∘ UA # φ.toFun ∘ UB] * -Real.log (p x y)) ≤ M * d[UA # UB] := calc _ = ∑ x ∈ X, ∑ y ∈ Y, (p x y) * M * dᵤ[A_ x # B_ y] + M * d[φ.toFun ∘ UA # φ.toFun ∘ UB] := by simp_rw [hM, Finset.sum_mul, ← Finset.sum_add_distrib] refine Finset.sum_congr rfl <| fun _ _ ↦ Finset.sum_congr rfl <| fun _ _ ↦ ?_ simp only [negMulLog, left_distrib, mul_assoc, Finset.sum_mul] exact congrArg (HAdd.hAdd _) (by group) _ = M * ∑ x ∈ X, ∑ y ∈ Y, (p x y) * dᵤ[A_ x # B_ y] + M * d[φ.toFun ∘ UA # φ.toFun ∘ UB] := by simp_rw [Finset.mul_sum] congr; ext; congr; ext; group _ ≤ M * d[UA # UB] := by rewrite [← left_distrib] apply mul_le_mul_of_nonneg_left · linarith · unfold M linarith only [entropy_nonneg (φ.toFun ∘ UA) ℙ, entropy_nonneg (φ.toFun ∘ UB) ℙ] have : ∃ x : X, ∃ y : Y, M * dᵤ[A_ x.val # B_ y.val] + d[φ.toFun ∘ UA # φ.toFun ∘ UB] * -Real.log (p x.val y.val) ≤ M * d[UA # UB] := by let f (xy : H × H) := (p xy.1 xy.2) * (M * d[UA # UB]) let g (xy : H × H) := (p xy.1 xy.2) * (M * dᵤ[A_ xy.1 # B_ xy.2] + d[φ.toFun ∘ UA # φ.toFun ∘ UB] * -Real.log (p xy.1 xy.2)) by_contra hc; push_neg at hc replace hc : ∀ xy ∈ X ×ˢ Y, f xy < g xy := by refine fun xy h ↦ mul_lt_mul_of_pos_left ?_ ?_ · exact hc ⟨xy.1, (Finset.mem_product.mp h).1⟩ ⟨xy.2, (Finset.mem_product.mp h).2⟩ · have : Nonempty _ := h_Ax ⟨xy.1, (Finset.mem_product.mp h).1⟩ have : Nonempty _ := h_By ⟨xy.2, (Finset.mem_product.mp h).2⟩ simp only [p, div_pos, mul_pos, Nat.cast_pos, Nat.card_pos] have h_nonempty : Finset.Nonempty (X ×ˢ Y) := by use ⟨φ.toFun <| UA <| Classical.choice <| ProbabilityMeasure.nonempty ⟨ℙ, inferInstance⟩, φ.toFun <| UB <| Classical.choice <| ProbabilityMeasure.nonempty ⟨ℙ, inferInstance⟩⟩ exact Finset.mem_product.mpr ⟨FiniteRange.mem _ _, FiniteRange.mem _ _⟩ replace hc := Finset.sum_lt_sum_of_nonempty h_nonempty hc have h_p_one : ∑ x ∈ X ×ˢ Y, p x.1 x.2 = 1 := by simp_rw [Finset.sum_product, p, mul_div_mul_comm, ← Finset.mul_sum, ← sum_prob_preimage hA h_φ_AX, sum_prob_preimage hB h_φ_BY, mul_one] rewrite [← Finset.sum_mul, h_p_one, one_mul, Finset.sum_product] at hc exact not_le_of_gt hc h_sum obtain ⟨x, y, hxy⟩ := this refine ⟨x, y, A_ x.val, B_ y.val, rfl, rfl, .of_subtype, .of_subtype, ?_⟩ rewrite [← inv_div, Real.log_inv] show _ * -log (p x.val y.val) ≤ M * _ linarith only [hxy] variable {G : Type*} [AddCommGroup G] [Module.Free ℤ G] open Real MeasureTheory ProbabilityTheory Pointwise Set Function open QuotientAddGroup
pfr/blueprint/src/chapter/weak_pfr.tex:142
pfr/PFR/WeakPFR.lean:437
PFR
sub_condMultiDistance_le
\begin{lemma}[Lower bound on conditional multidistance]\label{cond-multidist-lower}\lean{sub_condMultiDistance_le}\leanok If $(X_i)_{1 \leq i \leq m}$ is a $\tau$-minimizer, and $k := D[(X_i)_{1 \leq i \leq m}]$, then for any other tuples $(X'_i)_{1 \leq i \leq m}$ and $(Y_i)_{1 \leq i \leq m}$ with the $X'_i$ $G$-valued, one has $$ k - D[(X'_i)_{1 \leq i \leq m} | (Y_i)_{1 \leq i \leq m}] \leq \eta \sum_{i=1}^m d[X_i; X'_i|Y_i].$$ \end{lemma} \begin{proof}\uses{multidist-lower, cond-multidist-def, cond-multidist-alt}\leanok Immediate from \Cref{multidist-lower}, \Cref{cond-multidist-alt}, and \Cref{cond-dist-def}. \end{proof}
lemma sub_condMultiDistance_le {G Ω₀ : Type u} [MeasureableFinGroup G] [MeasureSpace Ω₀] (p : multiRefPackage G Ω₀) (Ω : Fin p.m → Type u) (hΩ : ∀ i, MeasureSpace (Ω i)) (hΩprob: ∀ i, IsProbabilityMeasure (hΩ i).volume) (X : ∀ i, Ω i → G) (hmeasX: ∀ i, Measurable (X i)) (h_min : multiTauMinimizes p Ω hΩ X) (Ω' : Fin p.m → Type u) (hΩ' : ∀ i, MeasureSpace (Ω' i)) (hΩ'prob: ∀ i, IsProbabilityMeasure (hΩ' i).volume) (X' : ∀ i, Ω' i → G) (hmeasX': ∀ i, Measurable (X' i)) {S : Type u} [Fintype S][MeasurableSpace S] [MeasurableSingletonClass S] (Y : ∀ i, Ω' i → S) (hY : ∀ i, Measurable (Y i)): D[X; hΩ] - D[X'|Y; hΩ'] ≤ p.η * ∑ i, d[X i ; (hΩ i).volume # X' i | Y i; (hΩ' i).volume ] := by set μ := fun ω: Fin p.m → S ↦ ∏ i : Fin p.m, (ℙ (Y i ⁻¹' {ω i})).toReal have probmes (i : Fin p.m) : ∑ ωi : S, (ℙ (Y i ⁻¹' {ωi})).toReal = 1 := by convert Finset.sum_toReal_measure_singleton (s := Finset.univ) (Measure.map (Y i) ℙ) with ω _ i _ · exact (MeasureTheory.Measure.map_apply (hY i) ( .singleton ω)).symm replace hΩ'prob := hΩ'prob i rw [MeasureTheory.Measure.map_apply (hY i) (Finset.measurableSet _), Finset.coe_univ, Set.preimage_univ, measure_univ, ENNReal.one_toReal] -- μ has total mass one have total : ∑ (ω : Fin p.m → S), μ ω = 1 := calc _ = ∏ i, ∑ ωi, (ℙ (Y i ⁻¹' {ωi})).toReal := by convert Finset.sum_prod_piFinset Finset.univ _ with ω _ i _ rfl _ = ∏ i, 1 := by apply Finset.prod_congr rfl intro i _ exact probmes i _ = 1 := by simp only [Finset.prod_const_one] calc _ = ∑ (ω: Fin p.m → S), μ ω * D[X; hΩ] - ∑ (ω: Fin p.m → S), μ ω * D[X' ; fun i ↦ MeasureSpace.mk ℙ[|Y i ⁻¹' {ω i}]] := by congr rw [← Finset.sum_mul, total, one_mul] _ = ∑ (ω: Fin p.m → S), μ ω * (D[X; hΩ] - D[X' ; fun i ↦ MeasureSpace.mk ℙ[|Y i ⁻¹' {ω i}]]) := by rw [← Finset.sum_sub_distrib] apply Finset.sum_congr rfl intro _ _ exact (mul_sub_left_distrib _ _ _).symm _ ≤ ∑ (ω: Fin p.m → S), μ ω * (p.η * ∑ i, d[X i ; (hΩ i).volume # X' i; ℙ[|Y i ⁻¹' {ω i}] ]) := by apply Finset.sum_le_sum intro ω _ rcases eq_or_ne (μ ω) 0 with hω | hω · simp [hω] gcongr let hΩ'_cond i := MeasureSpace.mk ℙ[|Y i ⁻¹' {ω i}] have hΩ'prob_cond i : IsProbabilityMeasure (hΩ'_cond i).volume := by refine cond_isProbabilityMeasure ?_ contrapose! hω apply Finset.prod_eq_zero (Finset.mem_univ i) simp only [hω, ENNReal.zero_toReal] exact sub_multiDistance_le p Ω hΩ hΩprob X hmeasX h_min Ω' hΩ'_cond hΩ'prob_cond X' hmeasX' _ = p.η * ∑ i, ∑ (ω: Fin p.m → S), μ ω * d[X i ; (hΩ i).volume # X' i; ℙ[|Y i ⁻¹' {ω i}] ] := by rw [Finset.sum_comm, Finset.mul_sum] apply Finset.sum_congr rfl intro ω _ rw [Finset.mul_sum, Finset.mul_sum, Finset.mul_sum] apply Finset.sum_congr rfl intro i _ ring _ = _ := by congr with i let f := fun j ↦ (fun ωj ↦ (ℙ (Y j ⁻¹' {ωj})).toReal * (if i=j then d[X i ; ℙ # X' i ; ℙ[|Y i ⁻¹' {ωj}]] else 1)) calc _ = ∑ ω : Fin p.m → S, ∏ j, f j (ω j) := by apply Finset.sum_congr rfl intro ω _ rw [Finset.prod_mul_distrib] congr simp only [Finset.prod_ite_eq, Finset.mem_univ, ↓reduceIte] _ = ∏ j, ∑ ωj, f j ωj := Finset.sum_prod_piFinset Finset.univ f _ = ∏ j, if i = j then d[X i # X' i | Y i] else 1 := by apply Finset.prod_congr rfl intro j _ by_cases hij : i = j · simp only [hij, mul_ite, mul_one, ↓reduceIte, f] rw [condRuzsaDist'_eq_sum' (hmeasX' i) (hY i), ← hij] simp only [mul_ite, mul_one, hij, ↓reduceIte, f] exact probmes j _ = _ := by simp only [Finset.prod_ite_eq, Finset.mem_univ, ↓reduceIte] /-- With the notation of the previous lemma, we have \begin{equation}\label{5.3-conv} k - D[ X'_{[m]} | Y_{[m]} ] \leq \eta \sum_{i=1}^m d[X_{\sigma(i)};X'_i|Y_i] \end{equation} for any permutation $\sigma : \{1,\dots,m\} \rightarrow \{1,\dots,m\}$. -/
pfr/blueprint/src/chapter/torsion.tex:340
pfr/PFR/MultiTauFunctional.lean:113
PFR
sub_condMultiDistance_le'
\begin{corollary}[Lower bound on conditional multidistance, II]\label{cond-multidist-lower-II}\lean{sub_condMultiDistance_le'}\leanok With the notation of the previous lemma, we have \begin{equation}\label{5.3-conv} k - D[ X'_{[m]} | Y_{[m]} ] \leq \eta \sum_{i=1}^m d[X_{\sigma(i)};X'_i|Y_i] \end{equation} for any permutation $\sigma : \{1,\dots,m\} \rightarrow \{1,\dots,m\}$. \end{corollary} \begin{proof}\uses{cond-multidist-lower, multidist-perm}\leanok This follows from \Cref{cond-multidist-lower} and \Cref{multidist-perm}. \end{proof}
lemma sub_condMultiDistance_le' {G Ω₀ : Type u} [MeasureableFinGroup G] [MeasureSpace Ω₀] (p : multiRefPackage G Ω₀) (Ω : Fin p.m → Type u) (hΩ : ∀ i, MeasureSpace (Ω i)) (hΩprob: ∀ i, IsProbabilityMeasure (hΩ i).volume) (X : ∀ i, Ω i → G) (hmeasX: ∀ i, Measurable (X i)) (h_min : multiTauMinimizes p Ω hΩ X) (Ω' : Fin p.m → Type u) (hΩ' : ∀ i, MeasureSpace (Ω' i)) (hΩ'prob: ∀ i, IsProbabilityMeasure (hΩ' i).volume) (X' : ∀ i, Ω' i → G) (hmeasX': ∀ i, Measurable (X' i)) {S : Type u} [Fintype S] [MeasurableSpace S] [MeasurableSingletonClass S] (Y : ∀ i, Ω' i → S) (hY : ∀ i, Measurable (Y i)) (φ : Equiv.Perm (Fin p.m)) : D[X; hΩ] - D[X'|Y; hΩ'] ≤ p.η * ∑ i, d[X (φ i) ; (hΩ (φ i)).volume # X' i | Y i; (hΩ' i).volume ] := by let Xφ := fun i => X (φ i) let Ωφ := fun i => Ω (φ i) let hΩφ := fun i => hΩ (φ i) let hΩφprob := fun i => hΩprob (φ i) let hmeasXφ := fun i => hmeasX (φ i) calc _ = D[Xφ; hΩφ] - D[X'|Y; hΩ'] := by congr 1 rw [multiDist_of_perm hΩ hΩprob X φ] _ ≤ _ := by apply sub_condMultiDistance_le p Ωφ hΩφ hΩφprob Xφ hmeasXφ _ Ω' hΩ' hΩ'prob X' hmeasX' Y hY intro Ω'' hΩ'' X'' calc _ = multiTau p Ω hΩ X := by dsimp [multiTau] congr 1 · exact multiDist_of_perm hΩ hΩprob X φ congr 1 exact Fintype.sum_equiv φ _ _ fun _ ↦ rfl _ ≤ multiTau p Ω'' hΩ'' X'' := h_min Ω'' hΩ'' X''
pfr/blueprint/src/chapter/torsion.tex:348
pfr/PFR/MultiTauFunctional.lean:190
PFR
sub_mem_symmGroup
\begin{lemma}[Zero Ruzsa distance implies large symmetry group]\label{zero-large} \lean{sub_mem_symmGroup}\leanok If $X$ is a $G$-valued random variable such that $d[X ;X]=0$, and $x,y \in G$ are such that $P[X=x], P[X=y]>0$, then $x-y \in \mathrm{Sym}[X]$. \end{lemma} \begin{proof} \uses{ruz-indep, ruz-copy, relabeled-entropy-cond,vanish-entropy, alternative-mutual, independent-exist,sym-group-def}\leanok Let $X_1,X_2$ be independent copies of $X$ (from \Cref{independent-exist}). Let $A$ denote the range of $X$. From \Cref{ruz-indep} and \Cref{ruz-copy} we have $$ \bbH[X_1-X_2] = \bbH[X_1].$$ Observe from \Cref{relabeled-entropy-cond} that $$ \bbH[X_1-X_2|X_2] = \bbH[X_1|X_2] = \bbH[X_1]$$ and hence by \Cref{alternative-mutual} $$ \bbI[X_1-X_2 : X_1] = 0.$$ By \Cref{vanish-entropy}, $X_1-X_2$ and $X_1$ are therefore independent, thus the law of $(X_1-X_2|X_1=x)$ does not depend on $x \in A$. The claim follows. \end{proof}
lemma sub_mem_symmGroup (hX : Measurable X) (hdist : d[X # X] = 0) {x y : G} (hx : ℙ (X⁻¹' {x}) ≠ 0) (hy : ℙ (X⁻¹' {y}) ≠ 0) : x - y ∈ symmGroup X hX := by /- Consider two independent copies `X'` and `Y'` of `X`. The assumption on the Rusza distance ensures that `H[X' - Y' | Y'] = H[X' - Y']`, i.e., `X' - Y'` and `Y'` are independent. Therefore, the distribution of `X' - c` is independent of `c` for `c` in the support of `Y'`. In particular, `X' - x` and `X' - y` have the same distribution, which is equivalent to the claim of the lemma. -/ rcases ProbabilityTheory.independent_copies_two hX hX with ⟨Ω', mΩ', X', Y', hP, hX', hY', h_indep, hidX, hidY⟩ rw [hidX.symm.symmGroup_eq hX hX'] have A : H[X' - Y' | Y'] = H[X' - Y'] := calc H[X' - Y' | Y'] = H[X' | Y'] := condEntropy_sub_right hX' hY' _ = H[X'] := h_indep.condEntropy_eq_entropy hX' hY' _ = H[X' - Y'] := by have : d[X' # Y'] = 0 := by rwa [hidX.rdist_eq hidY] rw [h_indep.rdist_eq hX' hY', ← (hidX.trans hidY.symm).entropy_eq] at this linarith have I : IndepFun (X' - Y') Y' := by refine (mutualInfo_eq_zero (by fun_prop) hY').1 ?_ rw [mutualInfo_eq_entropy_sub_condEntropy (by fun_prop) hY', A, sub_self] have M : ∀ c, ℙ (Y' ⁻¹' {c}) ≠ 0 → IdentDistrib (fun ω ↦ X' ω - c) (X' - Y') := by intro c hc let F := fun ω ↦ X' ω - c refine ⟨by fun_prop, by fun_prop, ?_⟩ ext s hs rw [Measure.map_apply (by fun_prop) hs, Measure.map_apply (by fun_prop) hs] have : ℙ (F ⁻¹' s) * ℙ (Y' ⁻¹' {c}) = ℙ ((X' - Y') ⁻¹' s) * ℙ (Y' ⁻¹' {c}) := by calc ℙ (F ⁻¹' s) * ℙ (Y' ⁻¹' {c}) = ℙ (F ⁻¹' s ∩ Y' ⁻¹' {c}) := by have hFY' : IndepFun F Y' := by have : Measurable (fun z ↦ z - c) := measurable_sub_const' c apply h_indep.comp this measurable_id rw [indepFun_iff_measure_inter_preimage_eq_mul.1 hFY' _ _ hs .of_discrete] _ = ℙ ((X' - Y') ⁻¹' s ∩ Y' ⁻¹' {c}) := by congr 1; ext z; simp (config := {contextual := true}) [F] _ = ℙ ((X' - Y') ⁻¹' s) * ℙ (Y' ⁻¹' {c}) := by rw [indepFun_iff_measure_inter_preimage_eq_mul.1 I _ _ hs .of_discrete] rwa [ENNReal.mul_left_inj hc (measure_ne_top ℙ _)] at this have J : IdentDistrib (fun ω ↦ X' ω - x) (fun ω ↦ X' ω - y) := by have Px : ℙ (Y' ⁻¹' {x}) ≠ 0 := by convert hx; exact hidY.measure_mem_eq .of_discrete have Py : ℙ (Y' ⁻¹' {y}) ≠ 0 := by convert hy; exact hidY.measure_mem_eq .of_discrete exact (M x Px).trans (M y Py).symm have : IdentDistrib X' (fun ω ↦ X' ω + (x - y)) := by have : Measurable (fun c ↦ c + x) := measurable_add_const x convert J.comp this using 1 · ext ω; simp · ext ω; simp; abel exact this /-- If `d[X # X] = 0`, then `X - x₀` is the uniform distribution on the subgroup of `G` stabilizing the distribution of `X`, for any `x₀` of positive probability. -/
pfr/blueprint/src/chapter/100_percent.tex:16
pfr/PFR/HundredPercent.lean:60
PFR
sub_multiDistance_le
\begin{lemma}[Lower bound on multidistance]\label{multidist-lower}\lean{sub_multiDistance_le}\leanok If $(X_i)_{1 \leq i \leq m}$ is a $\tau$-minimizer, and $k := D[(X_i)_{1 \leq i \leq m}]$, then for any other tuple $(X'_i)_{1 \leq i \leq m}$, one has $$ k - D[(X'_i)_{1 \leq i \leq m}] \leq \eta \sum_{i=1}^m d[X_i; X'_i].$$ \end{lemma} \begin{proof}\uses{tau-min-multi, ruzsa-triangle}\leanok By \Cref{tau-min-multi} we have $$ \tau[ (X_i)_{1 \leq i \leq m}] \leq \tau[ (X'_i)_{1 \leq i \leq m}]$$ and hence by \Cref{tau-def-multi} $$ k + \eta \sum_{i=1}^m d[X_i; X^0] \leq D[(X'_i)_{1 \leq i \leq m}] + \eta \sum_{i=1}^m d[X'_i; X^0].$$ On the other hand, by \Cref{ruzsa-triangle} we have $$ d[X'_i; X^0] \leq d[X_i; X^0] + d[X_i; X'_i].$$ The claim follows. \end{proof}
lemma sub_multiDistance_le {G Ω₀ : Type u} [MeasureableFinGroup G] [hΩ₀: MeasureSpace Ω₀] (p : multiRefPackage G Ω₀) (Ω : Fin p.m → Type u) (hΩ : ∀ i, MeasureSpace (Ω i)) (hΩprob: ∀ i, IsProbabilityMeasure (hΩ i).volume) (X : ∀ i, Ω i → G) (hmeasX: ∀ i, Measurable (X i)) (h_min : multiTauMinimizes p Ω hΩ X) (Ω' : Fin p.m → Type u) (hΩ' : ∀ i, MeasureSpace (Ω' i)) (hΩprob': ∀ i, IsProbabilityMeasure (hΩ' i).volume) (X' : ∀ i, Ω' i → G) (hmeasX': ∀ i, Measurable (X' i)) : D[X; hΩ] - D[X'; hΩ'] ≤ p.η * ∑ i, d[X i ; (hΩ i).volume # X' i; (hΩ' i).volume ] := by suffices D[X; hΩ] + p.η * ∑ i, d[X i ; (hΩ i).volume # p.X₀; hΩ₀.volume ] ≤ D[X'; hΩ'] + (p.η * ∑ i, d[X i ; (hΩ i).volume # p.X₀; hΩ₀.volume ] + p.η * ∑ i, d[X i ; (hΩ i).volume # X' i; (hΩ' i).volume ]) by linarith calc _ ≤ D[X'; hΩ'] + p.η * ∑ i, d[X' i ; (hΩ' i).volume # p.X₀; hΩ₀.volume ] := h_min Ω' hΩ' X' _ ≤ _ := by have hη : p.η > 0 := p.hη have hprob := p.hprob rw [← mul_add, ← Finset.sum_add_distrib] gcongr with i _ rw [add_comm, rdist_symm (Y := X' i)] apply rdist_triangle (hmeasX' i) (hmeasX i) p.hmeas /-- If $(X_i)_{1 \leq i \leq m}$ is a $\tau$-minimizer, and $k := D[(X_i)_{1 \leq i \leq m}]$, then for any other tuples $(X'_i)_{1 \leq i \leq m}$ and $(Y_i)_{1 \leq i \leq m}$ with the $X'_i$ $G$-valued, one has $$ k - D[(X'_i)_{1 \leq i \leq m} | (Y_i)_{1 \leq i \leq m}] \leq \eta \sum_{i=1}^m d[X_i; X'_i|Y_i].$$ -/
pfr/blueprint/src/chapter/torsion.tex:300
pfr/PFR/MultiTauFunctional.lean:95
PFR
sum_condMutual_le
\begin{lemma}[Bound on conditional mutual informations]\label{uvw-s} \uses{conditional-mutual-def} \lean{I₃_eq,sum_condMutual_le}\leanok We have $$ I(U : V \, | \, S) + I(V : W \, | \,S) + I(W : U \, | \, S) \leq 6 \eta k - \frac{1 - 5 \eta}{1-\eta} (2 \eta k - I_1). $$ \end{lemma} \begin{proof} \uses{second-estimate,symm-lemma}\leanok From the definitions of $I_1,I_2$ and \Cref{symm-lemma}, we see that \[ I_1 = I(U : V \, | \, S), \qquad I_2 = I(W : U \, | \, S), \qquad I_2 = I(V : W \, | \,S). \] Applying \Cref{first-estimate} and \Cref{second-estimate} we have the inequalities \[ I_2 \leq 2 \eta k + \frac{2\eta(2 \eta k - I_1)}{1-\eta} . \] We conclude that $$ I_1 + I_2 + I_2 \leq I_1+4\eta k+ \frac{4\eta(2 \eta k - I_1)}{1-\eta} $$ and the claim follows from some calculation. \end{proof}
lemma sum_condMutual_le [Module (ZMod 2) G] [IsProbabilityMeasure (ℙ : Measure Ω)] : I[U : V | S] + I[V : W | S] + I[W : U | S] ≤ 6 * p.η * k - (1 - 5 * p.η) / (1 - p.η) * (2 * p.η * k - I₁) := by have : I[W : U | S] = I₂ := condMutualInfo_comm (by fun_prop) (by fun_prop) .. rw [I₃_eq, this] have h₂ := second_estimate p X₁ X₂ X₁' X₂' hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep h_min have h := add_le_add (add_le_add_left h₂ I₁) h₂ convert h using 1 have : 1 - p.η > 0 := by linarith [p.hη'] field_simp [this] ring all_goals { simpa } local notation3:max "c[" A "; " μ " # " B " ; " μ' "]" => d[p.X₀₁; ℙ # A; μ] - d[p.X₀₁ # X₁] + (d[p.X₀₂; ℙ # B; μ'] - d[p.X₀₂ # X₂]) local notation3:max "c[" A " # " B "]" => d[p.X₀₁ # A] - d[p.X₀₁ # X₁] + (d[p.X₀₂ # B] - d[p.X₀₂ # X₂]) local notation3:max "c[" A " | " B " # " C " | " D "]" => d[p.X₀₁ # A|B] - d[p.X₀₁ # X₁] + (d[p.X₀₂ # C|D] - d[p.X₀₂ # X₂]) include h_indep h₁ h₂ in
pfr/blueprint/src/chapter/entropy_pfr.tex:255
pfr/PFR/Endgame.lean:132
PFR
sum_dist_diff_le
\begin{lemma}[Bound on distance increments]\label{total-dist}\leanok \lean{sum_dist_diff_le} We have \begin{align*} \sum_{i=1}^2 \sum_{A\in\{U,V,W\}} \big(d[X^0_i;A|S] & - d[X^0_i;X_i]\big) \\ &\leq (6 - 3\eta) k + 3(2 \eta k - I_1). \end{align*} \end{lemma} \begin{proof} \uses{second-useful, foursum-bound}\leanok By \Cref{second-useful} (taking $X = X_1^0$, $Y = X_1$, $Z = X_2$ and $Z' = \tilde X_1 + \tilde X_2$, so that $Y + Z = U$ and $Y + Z + Z' = S$) we have, noting that $\bbH[Y+ Z] = \bbH[Z']$, \[ d[X^0_1;U|S] - d[X^0_1;X_1] \leq \tfrac{1}{2} (\bbH[S] - \bbH[X_1]). \] Further applications of \Cref{second-useful} give \begin{align*} d[X^0_2;U|S] - d[X^0_2; X_2] &\leq \tfrac{1}{2} (\bbH[S] - \bbH[X_2]) \\ d[X^0_1;V|S] - d[X^0_1;X_1] &\leq \tfrac{1}{2} (\bbH[S] - \bbH[X_1])\\ d[X^0_2;V|S] - d[X^0_2;X_2] &\leq \tfrac{1}{2} (\bbH[S] - \bbH[X_2]) \end{align*} and \[d[X^0_1;W|S] - d[X^0_1;X_1] \leq \tfrac{1}{2} (\bbH[S] + \bbH[W] - \bbH[X_1] - \bbH[W']),\] where $W' := X_2 + \tilde X_2$. To treat $d[X^0_2;W|S]$, first note that this equals $d[X^0_2;W'|S]$, since for a fixed choice $s$ of $S$ we have $W' = W + s$ (here we need some helper lemma about Ruzsa distance). Now we may apply \Cref{second-useful} to obtain \[d[X^0_2;W'|S] - d[X^0_2;X_2] \leq \tfrac{1}{2} (\bbH[S] + \bbH[W'] - \bbH[X_2] - \bbH[W]).\] Summing these six estimates and using \Cref{foursum-bound}, we conclude that \begin{align*} \sum_{i=1}^2 \sum_{A\in\{U,V,W\}} \big(d[X^0_i;A|S] & - d[X^0_i;X_i]\big) \\ &\leq 3 \bbH[S] - \tfrac{3}{2} \bbH[X_1] - \tfrac{3}{2} \bbH[X_2]\\ &\leq (6 - 3\eta) k + 3(2 \eta k - I_1) \end{align*} as required. \end{proof}
lemma sum_dist_diff_le [IsProbabilityMeasure (ℙ : Measure Ω)] [Module (ZMod 2) G] : c[U|S # U|S] + c[V|S # V|S] + c[W|S # W|S] ≤ (6 - 3 * p.η)*k + 3 * (2*p.η*k - I₁) := by let X₀₁ := p.X₀₁ let X₀₂ := p.X₀₂ have ineq1 : d[X₀₁ # U | S] - d[X₀₁ # X₁] ≤ (H[S ; ℙ] - H[X₁ ; ℙ])/2 := by have aux1 : H[S] + H[U] - H[X₁] - H[X₁' + X₂'] = H[S] - H[X₁] := by rw [hU X₁ X₂ X₁' X₂' h₁ h₂ h_indep]; ring have aux2 : d[X₀₁ # U | U + (X₁' + X₂')] - d[X₀₁ # X₁] ≤ (H[U + (X₁' + X₂')] + H[U] - H[X₁] - H[X₁' + X₂']) / 2 := condRuzsaDist_diff_ofsum_le ℙ (hX := p.hmeas1) (hY := hX₁) (hZ := hX₂) (Measurable.add hX₁' hX₂') (independenceCondition1 hX₁ hX₂ hX₁' hX₂' h_indep) rw [← add_assoc, aux1] at aux2 linarith [aux2] have ineq2 : d[X₀₂ # U | S] - d[X₀₂ # X₂] ≤ (H[S ; ℙ] - H[X₂ ; ℙ])/2 := by have aux1 : H[S] + H[U] - H[X₂] - H[X₁' + X₂'] = H[S] - H[X₂] := by rw [hU X₁ X₂ X₁' X₂' h₁ h₂ h_indep] ; ring have aux2 : d[X₀₂ # U | U + (X₁' + X₂')] - d[X₀₂ # X₂] ≤ (H[U + (X₁' + X₂')] + H[U] - H[X₂] - H[X₁' + X₂']) / 2 := by rw [(show U = X₂ + X₁ from add_comm _ _)] apply condRuzsaDist_diff_ofsum_le ℙ (p.hmeas2) (hX₂) (hX₁) (Measurable.add hX₁' hX₂') (independenceCondition2 hX₁ hX₂ hX₁' hX₂' h_indep) rw [← add_assoc, aux1] at aux2 linarith [aux2] have V_add_eq : V + (X₁ + X₂') = S := by abel have ineq3 : d[X₀₁ # V | S] - d[X₀₁ # X₁] ≤ (H[S ; ℙ] - H[X₁ ; ℙ])/2 := by have aux2 : d[p.X₀₁ # V | V + (X₁ + X₂')] - d[p.X₀₁ # X₁'] ≤ (H[V + (X₁ + X₂')] + H[V] - H[X₁'] - H[X₁ + X₂']) / 2 := condRuzsaDist_diff_ofsum_le ℙ (p.hmeas1) (hX₁') (hX₂) (Measurable.add hX₁ hX₂') (independenceCondition3 hX₁ hX₂ hX₁' hX₂' h_indep) have aux1 : H[S] + H[V] - H[X₁'] - H[X₁ + X₂'] = H[S ; ℙ] - H[X₁ ; ℙ] := by rw [hV X₁ X₂ X₁' X₂' h₁ h₂ h_indep, h₁.entropy_eq]; ring rw [← ProbabilityTheory.IdentDistrib.rdist_eq (IdentDistrib.refl p.hmeas1.aemeasurable) h₁, V_add_eq, aux1] at aux2 linarith [aux2] have ineq4 : d[X₀₂ # V | S] - d[X₀₂ # X₂] ≤ (H[S ; ℙ] - H[X₂ ; ℙ])/2 := by have aux2 : d[p.X₀₂ # V | V + (X₁ + X₂')] - d[p.X₀₂ # X₂] ≤ (H[V + (X₁ + X₂')] + H[V] - H[X₂] - H[X₁ + X₂']) / 2 := by rw [(show V = X₂ + X₁' from add_comm _ _)] apply condRuzsaDist_diff_ofsum_le ℙ (p.hmeas2) (hX₂) (hX₁') (Measurable.add hX₁ hX₂') (independenceCondition4 hX₁ hX₂ hX₁' hX₂' h_indep) have aux1 : H[S] + H[V] - H[X₂] - H[X₁ + X₂'] = H[S ; ℙ] - H[X₂ ; ℙ] := by rw [hV X₁ X₂ X₁' X₂' h₁ h₂ h_indep]; ring rw [V_add_eq, aux1] at aux2 linarith [aux2] let W' := X₂ + X₂' have ineq5 : d[X₀₁ # W | S] - d[X₀₁ # X₁] ≤ (H[S ; ℙ] + H[W ; ℙ] - H[X₁ ; ℙ] - H[W' ; ℙ])/2 := by have := condRuzsaDist_diff_ofsum_le ℙ p.hmeas1 hX₁ hX₁' (Measurable.add hX₂ hX₂') (independenceCondition5 hX₁ hX₂ hX₁' hX₂' h_indep) have S_eq : X₁ + X₁' + (fun a ↦ X₂ a + X₂' a) = S := by rw [(show (fun a ↦ X₂ a + X₂' a) = X₂ + X₂' by rfl), ← add_assoc, add_assoc X₁, add_comm X₁', ← add_assoc] rwa [S_eq, add_comm X₁ X₁'] at this have ineq6 : d[X₀₂ # W' | S] - d[X₀₂ # X₂] ≤ (H[S ; ℙ] + H[W' ; ℙ] - H[X₂ ; ℙ] - H[W ; ℙ])/2 := by have := condRuzsaDist_diff_ofsum_le ℙ p.hmeas2 hX₂ hX₂' (Measurable.add hX₁' hX₁) (independenceCondition6 hX₁ hX₂ hX₁' hX₂' h_indep) have S_eq : X₂ + X₂' + (fun a ↦ X₁' a + X₁ a) = S := by rw [(show (fun a ↦ X₁' a + X₁ a) = X₁' + X₁ by rfl), add_comm, ← add_assoc, add_comm X₁', add_assoc X₁, add_comm X₁', ← add_assoc] rwa [S_eq] at this have dist_eq : d[X₀₂ # W' | S] = d[X₀₂ # W | S] := by have S_eq : S = (X₂ + X₂') + (X₁' + X₁) := by rw [add_comm X₁' X₁, add_assoc _ X₂', add_comm X₂', ← add_assoc X₂, ← add_assoc X₂, add_comm X₂] rw [S_eq] apply condRuzsaDist'_of_inj_map' p.hmeas2 (hX₂.add hX₂') (hX₁'.add hX₁) -- Put everything together to bound the sum of the `c` terms have ineq7 : c[U|S # U|S] + c[V|S # V|S] + c[W|S # W|S] ≤ 3 * H[S ; ℙ] - 3/2 * H[X₁ ; ℙ] -3/2 * H[X₂ ; ℙ] := by have step₁ : c[U|S # U|S] ≤ H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2 := calc _ = (d[p.X₀₁ # U|S] - d[p.X₀₁ # X₁]) + (d[p.X₀₂ # U|S] - d[p.X₀₂ # X₂]) := by ring _ ≤ (H[S ; ℙ] - H[X₁ ; ℙ])/2 + (H[S ; ℙ] - H[X₂ ; ℙ])/2 := add_le_add ineq1 ineq2 _ = H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2 := by ring have step₂ : c[V|S # V|S] ≤ H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2 := calc c[V|S # V|S] =(d[p.X₀₁ # V|S] - d[p.X₀₁ # X₁]) + (d[p.X₀₂ # V|S] - d[p.X₀₂ # X₂]) := by ring _ ≤ (H[S ; ℙ] - H[X₁ ; ℙ])/2 + (H[S ; ℙ] - H[X₂ ; ℙ])/2 := add_le_add ineq3 ineq4 _ = H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2 := by ring have step₃ : c[W|S # W|S] ≤ H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2 := calc c[W|S # W|S] = (d[X₀₁ # W | S] - d[X₀₁ # X₁]) + (d[X₀₂ # W' | S] - d[X₀₂ # X₂]) := by rw [dist_eq] _ ≤ (H[S ; ℙ] + H[W ; ℙ] - H[X₁ ; ℙ] - H[W' ; ℙ])/2 + (H[S ; ℙ] + H[W' ; ℙ] - H[X₂ ; ℙ] - H[W ; ℙ])/2 := add_le_add ineq5 ineq6 _ = H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2 := by ring calc c[U|S # U|S] + c[V|S # V|S] + c[W|S # W|S] ≤ (H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2) + (H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2) + (H[S ; ℙ] - (H[X₁ ; ℙ] + H[X₂ ; ℙ])/2) := add_le_add (add_le_add step₁ step₂) step₃ _ = 3 * H[S ; ℙ] - 3/2 * H[X₁ ; ℙ] -3/2 * H[X₂ ; ℙ] := by ring have h_indep' : iIndepFun ![X₁, X₂, X₂', X₁'] := by refine .of_precomp (Equiv.swap (2 : Fin 4) 3).surjective ?_ convert h_indep using 1 ext x fin_cases x ; all_goals { aesop } have ineq8 : 3 * H[S ; ℙ] ≤ 3/2 * (H[X₁ ; ℙ] + H[X₂ ; ℙ]) + 3*(2+p.η)*k - 3*I₁ := calc 3 * H[S ; ℙ] ≤ 3 * (H[X₁ ; ℙ] / 2 + H[X₂ ; ℙ] / 2 + (2+p.η)*k - I₁) := by apply (mul_le_mul_left (zero_lt_three' ℝ)).mpr (ent_ofsum_le p X₁ X₂ X₁' X₂' hX₁ hX₂ hX₁' hX₂' h₁ h₂ h_indep' h_min) _ = 3/2 * ( H[X₁ ; ℙ] + H[X₂ ; ℙ]) + 3*(2+p.η)*k - 3*I₁ := by ring -- Final computation calc c[U|S # U|S] + c[V|S # V|S] + c[W|S # W|S] ≤ 3 * H[S ; ℙ] - 3/2 * H[X₁ ; ℙ] -3/2 * H[X₂ ; ℙ] := ineq7 _ = 3 * H[S ; ℙ] - (3/2 *(H[X₁ ; ℙ] + H[X₂ ; ℙ])) := by ring _ ≤ (3/2 * ( H[X₁ ; ℙ] + H[X₂ ; ℙ]) + 3*(2+p.η)*k - 3*I₁) - (3/2 *(H[X₁ ; ℙ] + H[X₂ ; ℙ])) := sub_le_sub_right ineq8 _ _ = (6 - 3 * p.η)*k + 3 * (2*p.η*k - I₁) := by ring omit [Fintype G] hG [MeasurableSingletonClass G] mΩ in
pfr/blueprint/src/chapter/entropy_pfr.tex:280
pfr/PFR/Endgame.lean:209
PFR
sum_of_conditional_distance_le
\begin{lemma}[Distance bound]\label{xi-z2-w-dist}\lean{sum_of_conditional_distance_le}\leanok We have $\sum_{i=1}^m d[X_i;Z_2|W] \leq 8(m^3-m^2) k$. \end{lemma} \begin{proof}\uses{klm-3, cond-dist-fact, mutual-w-z2, ent-z2, multidist-ruzsa-II} For each $i \in \{1,\dots, m\}$, using \Cref{klm-3} (noting the sum $Z_2$ contains $X_i$ as a summand) we have \begin{equation}\label{in-a-bit-6} d[X_i;Z_2] \leq d[X_i;X_i] + \tfrac12 (\bbH[Z_2] - \bbH[X_i]) \end{equation} and using \Cref{cond-dist-fact} we have \[ d[X_i;Z_2 | W] \leq d[X_i;Z_2] + \tfrac12 \bbI[W : Z_2]. \] Combining with~\eqref{in-a-bit-6} and \Cref{mutual-w-z2} gives \[ d[X_i;Z_2 | W] \leq d[X_i;X_i] + \tfrac12 (\bbH[Z_2] - \bbH[X_i]) + (m-1)k.\] Summing over $i$ and applying \Cref{ent-z2} gives \[ \sum_{i = 1}^m d[X_i;Z_2 | W] \leq \sum_{i = 1}^m d[X_i;X_i] + m(8m^2-16m+1) k + m(m-1) k.\] Finally, applying \Cref{multidist-ruzsa-II} (and dropping some lower order terms) gives the claim. \end{proof}
/-- We have $\sum_{i=1}^m d[X_i;Z_2|W] \leq 8(m^3-m^2) k$. -/ lemma sum_of_conditional_distance_le : ∑ i, d[ X i # Z2 | W] ≤ 8 * (p.m^3 - p.m^2)*k := sorry /-- Let $G$ be an abelian group, let $(T_1,T_2,T_3)$ be a $G^3$-valued random variable such that $T_1+T_2+T_3=0$ holds identically, and write \[ \delta := \bbI[T_1 : T_2] + \bbI[T_1 : T_3] + \bbI[T_2 : T_3]. \] Let $Y_1,\dots,Y_n$ be some further $G$-valued random variables and let $\alpha>0$ be a constant. Then there exists a random variable $U$ such that $$ d[U;U] + \alpha \sum_{i=1}^n d[Y_i;U] \leq \Bigl(2 + \frac{\alpha n}{2} \Bigr) \delta + \alpha \sum_{i=1}^n d[Y_i;T_2]. $$ -/
pfr/blueprint/src/chapter/torsion.tex:735
pfr/PFR/TorsionEndgame.lean:66
PFR
sum_of_rdist_eq
\begin{corollary}[Specific fibring identity]\label{cor-fibre} \lean{sum_of_rdist_eq}\leanok Let $Y_1,Y_2,Y_3$ and $Y_4$ be independent $G$-valued random variables. Then \begin{align*} & d[Y_1+Y_3; Y_2+Y_4] + d[Y_1|Y_1+Y_3; Y_2|Y_2+Y_4] \\ &\qquad + \bbI[Y_1+Y_2 : Y_2 + Y_4 | Y_1+Y_2+Y_3+Y_4] = d[Y_1; Y_2] + d[Y_3; Y_4]. \end{align*} \end{corollary} \begin{proof} \uses{fibring-ident, add-entropy, relabeled-entropy-cond}\leanok We apply \Cref{fibring-ident} with $H := G \times G$, $H' := G$, $\pi$ the addition homomorphism $\pi(x,y) := x+y$, and with the random variables $Z_1 := (Y_1,Y_3)$ and $Z_2 := (Y_2,Y_4)$. Then by independence (\Cref{add-entropy}) \[ d[Z_1; Z_2] = d[Y_1; Y_2] + d[Y_3; Y_4] \] while by definition \[ d[\pi(Z_1); \pi(Z_2)] = d[Y_1+Y_3; Y_2+Y_4]. \] Furthermore, \[ d[Z_1|\pi(Z_1); Z_2|\pi(Z_2)] = d[Y_1|Y_1+Y_3;Y_2|Y_2+Y_4], \] since $Z_1=(Y_1,Y_3)$ and $Y_1$ are linked by an invertible affine transformation once $\pi(Z_1)=Y_1+Y_3$ is fixed, and similarly for $Z_2$ and $Y_2$. (This has to do with \Cref{relabeled-entropy-cond}) Finally, we have \begin{align*} &\bbI[Z_1 + Z_2 : (\pi(Z_1),\pi(Z_2)) \,|\, \pi(Z_1) + \pi(Z_2)] \\ &\ = \bbI[(Y_1+Y_2, Y_3+Y_4) : (Y_1+Y_3, Y_2+Y_4) \,|\, Y_1+Y_2+Y_3+Y_4] \\ &\ = \bbI[Y_1+Y_2 : Y_2+Y_4 \,|\, Y_1+Y_2+Y_3+Y_4] \end{align*} where in the last line we used the fact that $(Y_1+Y_2, Y_1+Y_2+Y_3+Y_4)$ uniquely determine $Y_3+Y_4$ and similarly $(Y_2+Y_4, Y_1+Y_2+Y_3+Y_4)$ uniquely determine $Y_1+Y_3$. (This requires another helper lemma about entropy.) \end{proof}
lemma sum_of_rdist_eq (Y : Fin 4 → Ω → G) (h_indep : iIndepFun Y μ) (h_meas : ∀ i, Measurable (Y i)) : d[Y 0; μ # Y 1; μ] + d[Y 2; μ # Y 3; μ] = d[(Y 0) - (Y 2); μ # (Y 1) - (Y 3); μ] + d[Y 0 | (Y 0) - (Y 2); μ # Y 1 | (Y 1) - (Y 3); μ] + I[(Y 0) - (Y 1) : (Y 1) - (Y 3) | (Y 0) - (Y 1) - (Y 2) + (Y 3); μ] := by let π : G × G →+ G := (AddMonoidHom.fst G G) - (AddMonoidHom.snd G G) have hπ {W_1 W_2 : Ω → G} : π ∘ ⟨W_1, W_2⟩ = W_1 - W_2 := rfl let Z_1 : Ω → G × G := ⟨Y 0, Y 2⟩ let Z_2 : Ω → G × G := ⟨Y 1, Y 3⟩ have hZ : Z_1 - Z_2 = ⟨Y 0 - Y 1, Y 2 - Y 3⟩ := rfl have m1 : Measurable Z_1 := (h_meas 0).prodMk (h_meas 2) have m2 : Measurable Z_2 := (h_meas 1).prodMk (h_meas 3) have h_indep_0 : IndepFun (Y 0) (Y 1) μ := h_indep.indepFun (by decide) have h_indep_2 : IndepFun (Y 2) (Y 3) μ := h_indep.indepFun (by decide) have h_indep_Z : IndepFun Z_1 Z_2 μ := h_indep.indepFun_prodMk_prodMk h_meas 0 2 1 3 (by decide) (by decide) (by decide) (by decide) have h_indep_sub : IndepFun (Y 0 - Y 1) (Y 2 - Y 3) μ := h_indep.indepFun_sub_sub h_meas 0 1 2 3 (by decide) (by decide) (by decide) (by decide) have msub (i j : Fin 4) : Measurable (Y i - Y j) := (h_meas i).sub (h_meas j) have h_add : d[Z_1; μ # Z_2; μ] = d[Y 0; μ # Y 1; μ] + d[Y 2; μ # Y 3; μ] := by rw [h_indep_0.rdist_eq (h_meas 0) (h_meas 1), h_indep_2.rdist_eq (h_meas 2) (h_meas 3), h_indep_Z.rdist_eq m1 m2, hZ, (entropy_pair_eq_add (h_meas 0) (h_meas 2)).2 (h_indep.indepFun (by decide)), (entropy_pair_eq_add (h_meas 1) (h_meas 3)).2 (h_indep.indepFun (by decide)), (entropy_pair_eq_add (msub 0 1) (msub 2 3)).2 h_indep_sub] ring_nf rw [← h_add, rdist_of_indep_eq_sum_fibre π h_indep_Z m1 m2] simp only [hπ, hZ] rw [sum_of_rdist_eq_step_condRuzsaDist h_indep h_meas, sum_of_rdist_eq_step_condMutualInfo h_meas] /-- Let $Y_1,Y_2,Y_3$ and $Y_4$ be independent $G$-valued random variables. Then $$d[Y_1+Y_3; Y_2+Y_4] + d[Y_1|Y_1+Y_3; Y_2|Y_2+Y_4] $$ $$ + I[Y_1+Y_2 : Y_2 + Y_4 | Y_1+Y_2+Y_3+Y_4] = d[Y_1; Y_2] + d[Y_3; Y_4].$$ -/
pfr/blueprint/src/chapter/fibring.tex:51
pfr/PFR/Fibring.lean:152
PFR
sum_of_z_eq_zero
\begin{lemma}[Zero-sum]\label{Zero-sum}\lean{sum_of_z_eq_zero}\leanok We have \begin{equation}% \label{eq:sum-zero} Z_1+Z_2+Z_3= 0 \end{equation} \end{lemma} \begin{proof}\uses{more-random}\leanok Clear from definition. \end{proof}
/-- Z_1+Z_2+Z_3= 0 -/ lemma sum_of_z_eq_zero :Z1 + Z2 + Z3 = 0 := by rw [← Finset.sum_add_distrib, ← Finset.sum_add_distrib] apply Finset.sum_eq_zero intro i _ rw [← Finset.sum_add_distrib, ← Finset.sum_add_distrib] apply Finset.sum_eq_zero intro j _ rw [← add_zsmul, ← add_zsmul] convert zero_zsmul ?_ simp
pfr/blueprint/src/chapter/torsion.tex:598
pfr/PFR/TorsionEndgame.lean:33
PFR
sum_uvw_eq_zero
\begin{lemma}[Key identity]\label{key-ident} \lean{sum_uvw_eq_zero}\leanok We have $U+V+W=0$. \end{lemma} \begin{proof} \leanok Obvious because we are in characteristic two. \end{proof}
/-- `U + V + W = 0`. -/ lemma sum_uvw_eq_zero [Module (ZMod 2) G] : U + V + W = 0 := by simp [add_assoc, add_left_comm (a := X₁), add_left_comm (a := X₂), ← ZModModule.sub_eq_add X₁', ZModModule.add_self]
pfr/blueprint/src/chapter/entropy_pfr.tex:314
pfr/PFR/Endgame.lean:326
PFR
tau_minimizer_exists
\begin{proposition}[$\tau$ has minimum]\label{tau-min}\uses{tau-min-def} \lean{tau_minimizer_exists}\leanok A pair $X_1, X_2$ of $\tau$-minimizers exist. \end{proposition} \begin{proof}\uses{tau-copy}\leanok By \Cref{tau-copy}, $\tau$ only depends on the probability distributions of $X_1, X_2$. This ranges over a compact space, and $\tau$ is continuous. So $\tau$ has a minimum. \end{proof}
/-- A pair of random variables minimizing $τ$ exists. -/ lemma tau_minimizer_exists [MeasurableSingletonClass G] : ∃ (Ω : Type uG) (_ : MeasureSpace Ω) (X₁ : Ω → G) (X₂ : Ω → G), Measurable X₁ ∧ Measurable X₂ ∧ IsProbabilityMeasure (ℙ : Measure Ω) ∧ tau_minimizes p X₁ X₂ := by let μ := (tau_min_exists_measure p).choose have : IsProbabilityMeasure μ.1 := (tau_min_exists_measure p).choose_spec.1 have : IsProbabilityMeasure μ.2 := (tau_min_exists_measure p).choose_spec.2.1 have P : IsProbabilityMeasure (μ.1.prod μ.2) := by infer_instance let M : MeasureSpace (G × G) := ⟨μ.1.prod μ.2⟩ refine ⟨G × G, M, Prod.fst, Prod.snd, measurable_fst, measurable_snd, P, ?_⟩ intro ν₁ ν₂ h₁ h₂ have A : τ[@Prod.fst G G # @Prod.snd G G | p] = τ[id ; μ.1 # id ; μ.2 | p] := ProbabilityTheory.IdentDistrib.tau_eq p IdentDistrib.fst_id IdentDistrib.snd_id convert (tau_min_exists_measure p).choose_spec.2.2 ν₁ ν₂ h₁ h₂
pfr/blueprint/src/chapter/entropy_pfr.tex:36
pfr/PFR/TauFunctional.lean:141
PFR
tau_minimizer_exists_rdist_eq_zero
\begin{theorem}[Limiting improved $\tau$-decrement]\label{de-prop-lim-improv}\lean{tau_minimizer_exists_rdist_eq_zero}\leanok For $\eta = 1/8$, there exist tau-minimizers $X_1, X_2$ satisfying $d[X_1;X_2] = 0$. \end{theorem} \begin{proof}\uses{de-prop-improv, tau-min}\leanok For each $\eta<1/8$, consider minimizers $X_1^\eta$ and $X_2^\eta$ from \Cref{tau-min}. By \Cref{de-prop-improv}, they satisfy $d[X_1^\eta; X_2^\eta]=0$. By compactness of the space of probability measures on $G$, one may extract a converging subsequence of the distributions of $X_1^\eta$ and $X_2^\eta$ as $\eta \to 1/8$. By continuity of all the involved quantities, the limit is a pair of tau-minimizers for $1/8$ satisfying additionally $d[X_1;X_2] = 0$. \end{proof}
lemma tau_minimizer_exists_rdist_eq_zero : ∃ (Ω : Type uG) (_ : MeasureSpace Ω) (X₁ : Ω → G) (X₂ : Ω → G), Measurable X₁ ∧ Measurable X₂ ∧ IsProbabilityMeasure (ℙ : Measure Ω) ∧ tau_minimizes p X₁ X₂ ∧ d[X₁ # X₂] = 0 := by -- let `uₙ` be a sequence converging from below to `η`. In particular, `uₙ < 1/8`. obtain ⟨u, -, u_mem, u_lim⟩ : ∃ u, StrictMono u ∧ (∀ (n : ℕ), u n ∈ Set.Ioo 0 p.η) ∧ Tendsto u atTop (𝓝 p.η) := exists_seq_strictMono_tendsto' p.hη -- For each `n`, consider a minimizer associated to `η = uₙ`. let q : ℕ → refPackage Ω₀₁ Ω₀₂ G := fun n ↦ ⟨p.X₀₁, p.X₀₂, p.hmeas1, p.hmeas2, u n, (u_mem n).1, by linarith [(u_mem n).2, p.hη']⟩ have : ∀ n, ∃ (μ : Measure G × Measure G), IsProbabilityMeasure μ.1 ∧ IsProbabilityMeasure μ.2 ∧ ∀ (ν₁ : Measure G) (ν₂ : Measure G), IsProbabilityMeasure ν₁ → IsProbabilityMeasure ν₂ → τ[id ; μ.1 # id ; μ.2 | q n] ≤ τ[id ; ν₁ # id ; ν₂ | q n] := fun n ↦ tau_min_exists_measure (q n) choose μ μ1_prob μ2_prob hμ using this -- The minimizer associated to `uₙ` is at zero Rusza distance of itself, by -- lemma `tau_strictly_decreases'`. have I n : d[id ; (μ n).1 # id ; (μ n).2] = 0 := by let M : MeasureSpace (G × G) := ⟨(μ n).1.prod (μ n).2⟩ have : IsProbabilityMeasure ((μ n).1.prod (μ n).2) := by infer_instance have : d[@Prod.fst G G # @Prod.snd G G] = d[id ; (μ n).1 # id ; (μ n).2] := IdentDistrib.rdist_eq IdentDistrib.fst_id IdentDistrib.snd_id rw [← this] apply tau_strictly_decreases' (q n) measurable_fst measurable_snd ?_ (by linarith [(u_mem n).2, p.hη']) intro ν₁ ν₂ h₁ h₂ have A : τ[@Prod.fst G G # @Prod.snd G G | q n] = τ[id ; (μ n).1 # id ; (μ n).2 | q n] := ProbabilityTheory.IdentDistrib.tau_eq (q n) IdentDistrib.fst_id IdentDistrib.snd_id rw [A] exact hμ n _ _ h₁ h₂ -- extract a converging subsequence of the sequence of minimizers, seen as pairs of probability -- measures on `G` (which is a compact space). let μ' : ℕ → ProbabilityMeasure G × ProbabilityMeasure G := fun n ↦ (⟨(μ n).1, μ1_prob n⟩, ⟨(μ n).2, μ2_prob n⟩) let _i : TopologicalSpace G := (⊥ : TopologicalSpace G) have : DiscreteTopology G := ⟨rfl⟩ -- The limiting pair of measures will be the desired minimizer. rcases IsCompact.tendsto_subseq (x := μ') isCompact_univ (fun n ↦ mem_univ _) with ⟨ν, -, φ, φmono, hν⟩ have φlim : Tendsto φ atTop atTop := φmono.tendsto_atTop let M : MeasureSpace (G × G) := ⟨(ν.1 : Measure G).prod ν.2⟩ have P : IsProbabilityMeasure ((ν.1 : Measure G).prod (ν.2 : Measure G)) := by infer_instance refine ⟨G × G, M, Prod.fst, Prod.snd, measurable_fst, measurable_snd, P, ?_, ?_⟩ -- check that it is indeed a minimizer, as a limit of minimizers. · intro ν₁ ν₂ h₁ h₂ have A : τ[@Prod.fst G G # @Prod.snd G G | p] = τ[id ; ν.1 # id ; ν.2 | p] := ProbabilityTheory.IdentDistrib.tau_eq p IdentDistrib.fst_id IdentDistrib.snd_id rw [A] have L1 : Tendsto (fun n ↦ τ[id ; (μ (φ n)).1 # id ; (μ (φ n)).2 | q (φ n)]) atTop (𝓝 (τ[id ; ν.1 # id ; ν.2 | p])) := by apply Tendsto.add (Tendsto.add ?_ (Tendsto.mul (u_lim.comp φlim) ?_)) (Tendsto.mul (u_lim.comp φlim) ?_) · apply Tendsto.comp (continuous_rdist_restrict_probabilityMeasure.tendsto _) hν · have : Continuous (fun (μ : ProbabilityMeasure G × ProbabilityMeasure G) ↦ d[p.X₀₁ ; ℙ # id ; μ.1]) := Continuous.comp (continuous_rdist_restrict_probabilityMeasure₁' _ _ p.hmeas1) continuous_fst apply Tendsto.comp (this.tendsto _) hν · have : Continuous (fun (μ : ProbabilityMeasure G × ProbabilityMeasure G) ↦ d[p.X₀₂ ; ℙ # id ; μ.2]) := Continuous.comp (continuous_rdist_restrict_probabilityMeasure₁' _ _ p.hmeas2) continuous_snd apply Tendsto.comp (this.tendsto _) hν have L2 : Tendsto (fun n ↦ τ[id ; ν₁ # id ; ν₂ | q (φ n)]) atTop (𝓝 (τ[id ; ν₁ # id ; ν₂ | p])) := Tendsto.add (Tendsto.add tendsto_const_nhds (Tendsto.mul (u_lim.comp φlim) tendsto_const_nhds)) (Tendsto.mul (u_lim.comp φlim) tendsto_const_nhds) exact le_of_tendsto_of_tendsto' L1 L2 (fun n ↦ hμ (φ n) _ _ h₁ h₂) -- check that it has zero Rusza distance, as a limit of a sequence at zero Rusza distance. · have : d[@Prod.fst G G # @Prod.snd G G] = d[id ; ν.1 # id ; ν.2] := IdentDistrib.rdist_eq IdentDistrib.fst_id IdentDistrib.snd_id rw [this] have L1 : Tendsto (fun n ↦ d[id ; (μ (φ n)).1 # id ; (μ (φ n)).2]) atTop (𝓝 (d[id ; ν.1 # id ; (ν.2 : Measure G)])) := by apply Tendsto.comp (continuous_rdist_restrict_probabilityMeasure.tendsto _) hν have L2 : Tendsto (fun n ↦ d[id ; (μ (φ n)).1 # id ; (μ (φ n)).2]) atTop (𝓝 0) := by simp [I] exact tendsto_nhds_unique L1 L2 /-- `entropic_PFR_conjecture_improv`: For two $G$-valued random variables $X^0_1, X^0_2$, there is some subgroup $H \leq G$ such that $d[X^0_1;U_H] + d[X^0_2;U_H] \le 10 d[X^0_1;X^0_2]$. -/
pfr/blueprint/src/chapter/improved_exponent.tex:185
pfr/PFR/ImprovedPFR.lean:734
PFR
tau_minimizes
\begin{definition}[$\tau$-minimizer]\label{tau-min-def} \uses{tau-def} \lean{tau_minimizes}\leanok A pair of $G$-valued random variables $X_1, X_2$ are said to be a $\tau$-minimizer if one has $$\tau[X_1;X_2] \leq \tau[X'_1;X'_2] $$ for all $G$-valued random variables $X'_1, X'_2$. \end{definition}
def tau_minimizes {Ω : Type*} [MeasureSpace Ω] (X₁ : Ω → G) (X₂ : Ω → G) : Prop := ∀ (ν₁ : Measure G) (ν₂ : Measure G), IsProbabilityMeasure ν₁ → IsProbabilityMeasure ν₂ → τ[X₁ # X₂ | p] ≤ τ[id ; ν₁ # id ; ν₂ | p] omit [IsProbabilityMeasure (ℙ : Measure Ω₀₁)] [IsProbabilityMeasure (ℙ : Measure Ω₀₂)] [Fintype G] in
pfr/blueprint/src/chapter/entropy_pfr.tex:26
pfr/PFR/TauFunctional.lean:105
PFR
tau_strictly_decreases
\begin{theorem}[$\tau$-decrement]\label{de-prop} \lean{tau_strictly_decreases}\leanok Let $X_1, X_2$ be tau-minimizers. Then $d[X_1;X_2] = 0$. \end{theorem} \begin{proof} \uses{construct-good, key-ident, uvw-s, total-dist, first-estimate, eta-def}\leanok Set $k := d[X_1;X_2]$. Applying \Cref{construct-good} with any random variables $(T_1,T_2,T_3)$ such that $T_1+T_2+T_3=0$ holds identically, we deduce that \[ k \leq \delta + \frac{\eta}{3} \biggl( \delta + \sum_{i=1}^2 \sum_{j = 1}^3 (d[X^0_1;T_j] - d[X^0_i;X_i]) \biggr). \] Note that $\delta$ is still defined by~\eqref{delta-t1t2t3-def} and thus depends on $T_1,T_2,T_3$. In particular we may apply this for \[ T_1 = (U | S = s),\qquad T_2 = (V | S = s), \qquad T_3 = (W | S = s) \] for $s$ in the range of $S$ (which is a valid choice by \Cref{key-ident}) and then average over $s$ with weights $p_S(s)$, to obtain \[k \leq \tilde \delta + \frac{\eta}{3} \biggl( \tilde \delta + \sum_{i=1}^2 \sum_{A\in\{U,V,W\}} \bigl( d[X^0_i;A|S] - d[X^0_i;X_i]\bigr) \biggr),\] where \[ \tilde \delta := \bbI[U : V | S] + \bbI[V : W | S] + \bbI[W : U | S]. \] Putting this together with \Cref{uvw-s} and \Cref{total-dist}, we conclude that \begin{align*} k &\leq \Bigl(1+\frac{\eta}{3}\Bigr)\Bigl(6\eta k-\frac{1-5\eta}{1-\eta}(2\eta k-I_1)\Bigr)+\frac{\eta}{3}\Bigl((6-3\eta)k+3(2\eta k-I_1)\Bigr)\\ &= (8\eta + \eta^2) k - \biggl( \frac{1 - 5 \eta}{1-\eta}\Bigl(1 + \frac{\eta}{3}\Bigr) - \eta \biggr)(2 \eta k - I_1)\\ &\leq (8 \eta + \eta^2) k \end{align*} since the quantity $2 \eta k - I_1$ is non-negative (by \Cref{first-estimate}), and its coefficient in the above expression is non-positive provided that $\eta(2\eta + 17) \le 3$, which is certainly the case with \Cref{eta-def}. Moreover, from \Cref{eta-def} we have $8 \eta + \eta^2 < 1$. It follows that $k=0$, as desired. \end{proof}
theorem tau_strictly_decreases (h_min : tau_minimizes p X₁ X₂) (hpη : p.η = 1/9) : d[X₁ # X₂] = 0 := by let ⟨A, mA, μ, Y₁, Y₂, Y₁', Y₂', hμ, h_indep, hY₁, hY₂, hY₁', hY₂', h_id1, h_id2, h_id1', h_id2'⟩ := independent_copies4_nondep hX₁ hX₂ hX₁ hX₂ ℙ ℙ ℙ ℙ rw [← h_id1.rdist_eq h_id2] let _ : MeasureSpace A := ⟨μ⟩ have : IsProbabilityMeasure (ℙ : Measure A) := hμ rw [← h_id1.tau_minimizes p h_id2] at h_min apply tau_strictly_decreases_aux p Y₁ Y₂ Y₁' Y₂' hY₁ hY₂ hY₁' hY₂' (h_id1.trans h_id1'.symm) (h_id2.trans h_id2'.symm) h_indep h_min hpη /-- `entropic_PFR_conjecture`: For two $G$-valued random variables $X^0_1, X^0_2$, there is some subgroup $H \leq G$ such that $d[X^0_1;U_H] + d[X^0_2;U_H] \le 11 d[X^0_1;X^0_2]$. -/
pfr/blueprint/src/chapter/entropy_pfr.tex:382
pfr/PFR/EntropyPFR.lean:33
PFR
tau_strictly_decreases'
\begin{theorem}[Improved $\tau$-decrement]\label{de-prop-improv}\lean{tau_strictly_decreases'}\leanok Suppose $0 < \eta < 1/8$. Let $X_1, X_2$ be tau-minimizers. Then $d[X_1;X_2] = 0$. \end{theorem} \begin{proof}\uses{averaged-construct-good, dist-diff-bound, uvw-s}\leanok From \Cref{averaged-construct-good}, \Cref{dist-diff-bound}, and \Cref{uvw-s} one has \[ k \leq 8\eta k - \frac{(1 -5 \eta - \frac{4}{6} \eta)(2 \eta k - I_1)}{(1-\eta)}.\] For any $\eta < 1/8$, we see from \Cref{first-estimate} that the expression $\frac{(1 -5 \eta - \frac{4}{6} \eta)(2 \eta k - I_1)}{(1-\eta)}$ is nonnegative, and hence $k = 0$ as required. \end{proof}
theorem tau_strictly_decreases' (hp : 8 * p.η < 1) : d[X₁ # X₂] = 0 := by let ⟨A, mA, μ, Y₁, Y₂, Y₁', Y₂', hμ, h_indep, hY₁, hY₂, hY₁', hY₂', h_id1, h_id2, h_id1', h_id2'⟩ := independent_copies4_nondep hX₁ hX₂ hX₁ hX₂ ℙ ℙ ℙ ℙ rw [← h_id1.rdist_eq h_id2] let _ : MeasureSpace A := ⟨μ⟩ have : IsProbabilityMeasure (ℙ : Measure A) := hμ rw [← h_id1.tau_minimizes p h_id2] at h_min exact tau_strictly_decreases_aux' p hY₁ hY₂ hY₁' hY₂' (h_id1.trans h_id1'.symm) (h_id2.trans h_id2'.symm) h_indep.reindex_four_abdc h_min hp end MainEstimates
pfr/blueprint/src/chapter/improved_exponent.tex:174
pfr/PFR/ImprovedPFR.lean:704
PFR
torsion_PFR
\begin{theorem}[PFR]\label{pfr-torsion}\lean{torsion_PFR}\leanok Suppose that $G$ is a finite abelian group of torsion $m$. If $A \subset G$ is non-empty and $|A+A| \leq K|A|$, then $A$ can be covered by most $mK^{96m^3+2}$ translates of a subspace $H$ of $G$ with $|H| \leq |A|$. \end{theorem} \begin{proof}\uses{pfr_aux_torsion}\leanok Repeat the proof of \Cref{pfr}, but with \Cref{pfr_aux_torsion} in place of \Cref{pfr_aux}. \end{proof}
theorem torsion_PFR {G : Type*} [AddCommGroup G] [Fintype G] {m:ℕ} (hm: m ≥ 2) (htorsion : ∀ x:G, m • x = 0) {A : Set G} [Finite A] {K : ℝ} (h₀A : A.Nonempty) (hA : Nat.card (A + A) ≤ K * Nat.card A) : ∃ (H : AddSubgroup G) (c : Set G), Nat.card c < m * K ^ (96*m^3+2) ∧ Nat.card H ≤ Nat.card A ∧ A ⊆ c + H := by obtain ⟨A_pos, -, K_pos⟩ : (0 : ℝ) < Nat.card A ∧ (0 : ℝ) < Nat.card (A + A) ∧ 0 < K := PFR_conjecture_pos_aux' h₀A hA -- consider the subgroup `H` given by Lemma `torsion_PFR_conjecture_aux`. obtain ⟨H, c, hc, IHA, IAH, A_subs_cH⟩ : ∃ (H : AddSubgroup G) (c : Set G), Nat.card c ≤ K ^ (64 * m^3+2) * Nat.card A ^ (1/2) * Nat.card H ^ (-1/2) ∧ Nat.card H ≤ K ^ (64*m^3) * Nat.card A ∧ Nat.card A ≤ K ^ (64*m^3) * Nat.card H ∧ A ⊆ c + H := torsion_PFR_conjecture_aux hm htorsion h₀A hA have H_pos : (0 : ℝ) < Nat.card H := by have : 0 < Nat.card H := Nat.card_pos; positivity rcases le_or_lt (Nat.card H) (Nat.card A) with h|h -- If `#H ≤ #A`, then `H` satisfies the conclusion of the theorem · refine ⟨H, c, ?_, h, A_subs_cH⟩ calc Nat.card c ≤ K ^ ((64*m^3+2)) * Nat.card A ^ (1/2) * Nat.card H ^ (-1/2) := hc _ ≤ K ^ ((64*m^3+2)) * (K ^ (64*m^3) * Nat.card H) ^ (1/2) * Nat.card H ^ (-1/2) := by gcongr _ = K ^ (96*m^3+2) := by rpow_ring; norm_num; congr 1; ring _ < m * K ^ (96*m^3+2) := by apply (lt_mul_iff_one_lt_left _).mpr · norm_num; linarith [hm] positivity -- otherwise, we decompose `H` into cosets of one of its subgroups `H'`, chosen so that -- `#A / m < #H' ≤ #A`. This `H'` satisfies the desired conclusion. · obtain ⟨H', IH'A, IAH', H'H⟩ : ∃ H' : AddSubgroup G, Nat.card H' ≤ Nat.card A ∧ Nat.card A < m * Nat.card H' ∧ H' ≤ H := by have A_pos' : 0 < Nat.card A := mod_cast A_pos exact torsion_exists_subgroup_subset_card_le hm htorsion H h.le A_pos'.ne' have : (Nat.card A / m : ℝ) < Nat.card H' := by rw [div_lt_iff₀, mul_comm] · norm_cast norm_cast; exact Nat.zero_lt_of_lt hm have H'_pos : (0 : ℝ) < Nat.card H' := by have : 0 < Nat.card H' := Nat.card_pos; positivity obtain ⟨u, HH'u, hu⟩ := AddSubgroup.exists_left_transversal_of_le H'H refine ⟨H', c + u, ?_, IH'A, by rwa [add_assoc, HH'u]⟩ calc (Nat.card (c + u) : ℝ) ≤ Nat.card c * Nat.card u := mod_cast Set.natCard_add_le _ ≤ (K ^ ((64*m^3+2)) * Nat.card A ^ (1 / 2) * (Nat.card H ^ (-1 / 2))) * (Nat.card H / Nat.card H') := by gcongr apply le_of_eq rw [eq_div_iff H'_pos.ne'] norm_cast _ < (K ^ ((64*m^3+2)) * Nat.card A ^ (1 / 2) * (Nat.card H ^ (-1 / 2))) * (Nat.card H / (Nat.card A / m)) := by gcongr _ = m * K ^ ((64*m^3+2)) * Nat.card A ^ (-1/2) * Nat.card H ^ (1/2) := by field_simp rpow_ring norm_num _ ≤ m * K ^ ((64*m^3+2)) * Nat.card A ^ (-1/2) * (K ^ (64*m^3) * Nat.card A) ^ (1/2) := by gcongr _ = m * K ^ (96*m^3+2) := by rpow_ring norm_num left; congr 1 ring
pfr/blueprint/src/chapter/torsion.tex:875
pfr/PFR/TorsionEndgame.lean:163
PFR
torsion_PFR_conjecture_aux
\begin{lemma}\label{pfr_aux_torsion}\lean{torsion_PFR_conjecture_aux}\leanok Suppose that $G$ is a finite abelian group of torsion $m$. If $A \subset G$ is non-empty and $|A+A| \leq K|A|$, then $A$ can be covered by at most $K ^ {(64m^3+2)/2}|A|^{1/2}/|H|^{1/2}$ translates of a subspace $H$ of $G$ with \begin{equation} \label{ah2} |H|/|A| \in [K^{-64m^3}, K^{64m^3}]. \end{equation} \end{lemma} \begin{proof}\uses{main-entropy, unif-exist, uniform-entropy-II, jensen-bound,ruz-dist-def,ruzsa-diff,bound-conc,ruz-cov} Repeat the proof of \Cref{pfr_aux}, but with \Cref{main-entropy} in place of \Cref{entropy-pfr}. \end{proof}
lemma torsion_PFR_conjecture_aux {G : Type*} [AddCommGroup G] [Fintype G] {m:ℕ} (hm: m ≥ 2) (htorsion: ∀ x:G, m • x = 0) {A : Set G} [Finite A] {K : ℝ} (h₀A : A.Nonempty) (hA : Nat.card (A + A) ≤ K * Nat.card A) : ∃ (H : AddSubgroup G) (c : Set G), Nat.card c ≤ K ^ (64 * m^3 + 2) * Nat.card A ^ (1/2) * Nat.card H ^ (-1/2 : ℝ ) ∧ Nat.card H ≤ K ^ (64 * m^3) * Nat.card A ∧ Nat.card A ≤ K ^ (64 * m^3) * Nat.card H ∧ A ⊆ c + H := sorry
pfr/blueprint/src/chapter/torsion.tex:863
pfr/PFR/TorsionEndgame.lean:96
PFR
torsion_dist_shrinking
\begin{lemma}\label{torsion-dist-shrinking}\lean{torsion_dist_shrinking}\leanok If $G$ is a torsion-free group and $X,Y$ are $G$-valued random variables and $\phi:G\to \mathbb{F}_2^d$ is a homomorphism then \[\mathbb{H}(\phi(X))\leq 10d[X;Y].\] \end{lemma} \begin{proof} \uses{fibring-ineq, torsion-free-doubling, dist-zero}\leanok By \Cref{fibring-ineq} and \Cref{torsion-free-doubling} we have \[d[\phi(X);\phi(2Y)]\leq d[X;2Y]\leq 5d[X;Y]\] and $\phi(2Y)=2\phi(Y)\equiv 0$ so the left-hand side is equal to $d[\phi(X);0]=\mathbb{H}(\phi(X))/2$ (using \Cref{dist-zero}). \end{proof}
lemma torsion_dist_shrinking {H : Type*} [FiniteRange X] [FiniteRange Y] (hX : Measurable X) (hY : Measurable Y) [AddCommGroup H] [Module (ZMod 2) H] [MeasurableSpace H] [MeasurableSingletonClass H] [Countable H] (hG : AddMonoid.IsTorsionFree G) (φ : G →+ H) : H[φ ∘ X ; μ] ≤ 10 * d[X ; μ # Y ; μ'] := calc H[φ ∘ X ; μ] = 2 * d[φ ∘ X ; μ # φ ∘ (Y + Y) ; μ'] := by rw [map_comp_add, ZModModule.add_self, Pi.zero_def, rdist_zero_eq_half_ent, mul_div_cancel₀] exact two_ne_zero _ ≤ 2 * d[X ; μ # Y + Y ; μ'] := by gcongr; exact rdist_of_hom_le φ hX (hY.add hY) _ ≤ 2 * (5 * d[X ; μ # Y ; μ']) := by gcongr; exact torsion_free_doubling X Y μ μ' hX hY hG _ = 10 * d[X ; μ # Y ; μ'] := by ring end Torsion
pfr/blueprint/src/chapter/weak_pfr.tex:41
pfr/PFR/WeakPFR.lean:212
PFR
torsion_free_doubling
\begin{lemma}\label{torsion-free-doubling}\lean{torsion_free_doubling}\leanok If $G$ is torsion-free and $X,Y$ are $G$-valued random variables then $d[X;2Y]\leq 5d[X;Y]$. \end{lemma} \begin{proof} \uses{alt-submodularity,ruzsa-triangle,ruzsa-diff}\leanok Let $Y_1,Y_2$ be independent copies of $Y$ (also independent of $X$). Since $G$ is torsion-free we know $X,Y_1-Y_2,X-2Y_1$ uniquely determine $X,Y_1,Y_2$ and so \[\mathbb{H}(X,Y_1,Y_2,X-2Y_1)=\mathbb{H}(X,Y_1,Y_2)=\mathbb{H}(X)+2\mathbb{H}(Y).\] Similarly \[\mathbb{H}(X,X-2Y_1)=\mathbb{H}(X)+\mathbb{H}(2Y_1)=\mathbb{H}(X)+\mathbb{H}(Y).\] Furthermore \[\mathbb{H}(Y_1-Y_2,X-2Y_1)=\mathbb{H}(Y_1-Y_2,X-Y_1-Y_2)\leq \mathbb{H}(Y_1-Y_2)+\mathbb{H}(X-Y_1-Y_2).\] By submodularity (\Cref{alt-submodularity}) \[\mathbb{H}(X,Y_1,Y_2,X-2Y_1)+\mathbb{H}(X-2Y_1)\leq \mathbb{H}(X,X-2Y_1)+\mathbb{H}(Y_1-Y_2,X-2Y_1).\] Combining these inequalities \[\mathbb{H}(X-2Y_1)\leq \mathbb{H}(Y_1-Y_2)+\mathbb{H}(X-Y_1-Y_2)-\mathbb{H}(Y).\] Similarly we have \[\mathbb{H}(Y_1,Y_2,X-Y_1-Y_2)=\mathbb{H}(X)+2\mathbb{H}(Y),\] \[\mathbb{H}(Y_1,X-Y_1-Y_2)=\mathbb{H}(Y)+\mathbb{H}(X-Y_2),\] and \[\mathbb{H}(Y_2,X-Y_1-Y_2)=\mathbb{H}(Y)+\mathbb{H}(X-Y_1)\] and by submodularity (\Cref{alt-submodularity}) again \[\mathbb{H}(Y_1,Y_2,X-Y_1-Y_2)+ \mathbb{H}(X-Y_1-Y_2)\leq \mathbb{H}(Y_1,X-Y_1-Y_2)+\mathbb{H}(Y_2,X-Y_1-Y_2).\] Combining these inequalities (and recalling the definition of Ruzsa distance) gives \[\mathbb{H}(X-Y_1-Y_2)\leq \mathbb{H}(X-Y_1)+\mathbb{H}(X-Y_2)-\mathbb{H}(X)=2d[X;Y]+\mathbb{H}(Y).\] It follows that \[\mathbb{H}(X-2Y_1)\leq \mathbb{H}(Y_1-Y_2)+2d[X;Y]\] and so (using $\mathbb{H}(2Y)=\mathbb{H}(Y)$) \begin{align*} d[X;2Y] &=\mathbb{H}(X-2Y_1)-\mathbb{H}(X)/2-\mathbb{H}(2Y)/2\\ &\leq \mathbb{H}(Y_1-Y_2)+2d[X;Y]-\mathbb{H}(X)/2-\mathbb{H}(Y)/2\\ &= d[Y_1;Y_2]+\frac{\mathbb{H}(Y)-\mathbb{H}(X)}{2}+2d[X;Y]. \end{align*} Finally note that by the triangle inequality (\Cref{ruzsa-triangle}) we have \[d[Y_1;Y_2]\leq d[Y_1;X]+d[X;Y_2]=2d[X;Y].\] The result follows from $(\mathbb{H}(Y)-\mathbb{H}(X))/2\leq d[X;Y]$ (\Cref{ruzsa-diff}). \end{proof}
/-- If `G` is torsion-free and `X, Y` are `G`-valued random variables then `d[X ; 2Y] ≤ 5d[X ; Y]`. -/ lemma torsion_free_doubling [FiniteRange X] [FiniteRange Y] (hX : Measurable X) (hY : Measurable Y) (hG : AddMonoid.IsTorsionFree G) : d[X ; μ # (Y + Y) ; μ'] ≤ 5 * d[X ; μ # Y ; μ'] := by obtain ⟨A, mA, μA, X', Y'₁, Y'₂, hμA, h_indep, hX'_meas, hY'₁_meas, hY'₂_meas, hX'_ident, hY'₁_ident, hY'₂_ident, _, _, _⟩ := independent_copies3_nondep_finiteRange hX hY hY μ μ' μ' have h_meas (i : Fin 3) : Measurable (![X', Y'₁, Y'₂] i) := by fin_cases i <;> assumption have : NoZeroSMulDivisors ℕ G := hG.noZeroSMulDivisors_nat have : H[⟨X', ⟨Y'₁ - Y'₂, X' - 2 • Y'₁⟩⟩ ; μA] = H[X ; μ] + 2 * H[Y ; μ'] := calc H[⟨X', ⟨Y'₁ - Y'₂, X' - 2 • Y'₁⟩⟩ ; μA] = H[⟨X', ⟨Y'₁, Y'₂⟩⟩ ; μA] := by let f : G × G × G → G × G × G := fun ⟨x, y₁, y₂⟩ ↦ (x, y₁ - y₂, x - 2 • y₁) show H[f ∘ ⟨X', ⟨Y'₁, Y'₂⟩⟩ ; μA] = _ refine entropy_comp_of_injective μA ?_ f ?_ · exact Measurable.prod hX'_meas <| Measurable.prod hY'₁_meas hY'₂_meas · exact fun ⟨_, _, _⟩ _ h ↦ by simp [f] at h; obtain ⟨_, _, _⟩ := h; simp_all [smul_right_inj] _ = H[X ; μ] + 2 * H[Y ; μ'] := by have : IndepFun X' (prod Y'₁ Y'₂) μA := Indep.symm <| h_indep.indepFun_prodMk h_meas 1 2 0 (by decide) (by decide) rw [this.entropy_pair_eq_add hX'_meas (by exact Measurable.prod hY'₁_meas hY'₂_meas), IndepFun.entropy_pair_eq_add hY'₁_meas hY'₂_meas (h_indep.indepFun (show 1 ≠ 2 by decide)), hX'_ident.entropy_eq, hY'₁_ident.entropy_eq, hY'₂_ident.entropy_eq, two_mul] have : H[⟨X', X' - 2 • Y'₁⟩ ; μA] = H[X ; μ] + H[Y ; μ'] := calc H[⟨X', X' - 2 • Y'₁⟩ ; μA] = H[⟨X', Y'₁⟩ ; μA] := by let f : G × G → G × G := fun ⟨x, y₁⟩ ↦ (x, x - 2 • y₁) show H[f ∘ ⟨X', Y'₁⟩ ; μA] = _ apply entropy_comp_of_injective μA (by exact Measurable.prod hX'_meas hY'₁_meas) f exact fun ⟨_, _⟩ _ h ↦ by simp [f] at h; obtain ⟨_, _⟩ := h; simp_all [smul_right_inj] _ = H[X ; μ] + H[Y ; μ'] := by rw [IndepFun.entropy_pair_eq_add hX'_meas hY'₁_meas (h_indep.indepFun (show 0 ≠ 1 by decide)), hX'_ident.entropy_eq, hY'₁_ident.entropy_eq] let f : G × G → G × G := fun ⟨x, y⟩ ↦ (x, y - x) have hf : f.Injective := fun ⟨_, _⟩ _ h ↦ by simp [f] at h; obtain ⟨_, _⟩ := h; simp_all have : H[⟨Y'₁ - Y'₂, X' - 2 • Y'₁⟩ ; μA] ≤ H[Y'₁ - Y'₂ ; μA] + H[X' - Y'₁ - Y'₂ ; μA] := calc H[⟨Y'₁ - Y'₂, X' - 2 • Y'₁⟩ ; μA] = H[f ∘ ⟨Y'₁ - Y'₂, X' - Y'₁ - Y'₂⟩ ; μA] := by show _ = H[⟨Y'₁ - Y'₂, X' - Y'₁ - Y'₂ - (Y'₁ - Y'₂)⟩ ; μA] rw [sub_sub_sub_cancel_right, ← sub_add_eq_sub_sub, two_nsmul] _ = H[⟨Y'₁ - Y'₂, X' - Y'₁ - Y'₂⟩ ; μA] := by refine entropy_comp_of_injective μA (Measurable.prod ?_ ?_) f hf · exact Measurable.sub hY'₁_meas hY'₂_meas · exact Measurable.sub (Measurable.sub hX'_meas hY'₁_meas) hY'₂_meas _ ≤ H[Y'₁ - Y'₂ ; μA] + H[X' - Y'₁ - Y'₂ ; μA] := entropy_pair_le_add (hY'₁_meas.sub hY'₂_meas) (hX'_meas.sub hY'₁_meas |>.sub hY'₂_meas) μA have : H[⟨X', ⟨Y'₁ - Y'₂, X' - 2 • Y'₁⟩⟩ ; μA] + H[X' - 2 • Y'₁ ; μA] ≤ H[⟨X', X' - 2 • Y'₁⟩ ; μA] + H[⟨Y'₁ - Y'₂, X' - 2 • Y'₁⟩ ; μA] := by have : FiniteRange (Y'₁ - Y'₂) := FiniteRange.sub Y'₁ Y'₂ have : FiniteRange (2 • Y'₁) := by show FiniteRange ((fun x ↦ 2 • x) ∘ Y'₁); infer_instance apply entropy_triple_add_entropy_le μA hX'_meas (Measurable.sub hY'₁_meas hY'₂_meas) exact Measurable.sub hX'_meas <| Measurable.const_smul hY'₁_meas 2 have : H[⟨Y'₁, ⟨Y'₂, X' - Y'₁ - Y'₂⟩⟩ ; μA] = H[X ; μ] + 2 * H[Y ; μ'] := calc H[⟨Y'₁, ⟨Y'₂, X' - Y'₁ - Y'₂⟩⟩ ; μA] = H[⟨Y'₁, ⟨Y'₂, X'⟩⟩ ; μA] := by let f : G × G × G → G × G × G := fun ⟨y₁, y₂, x⟩ ↦ (y₁, y₂, x - y₁ - y₂) show H[f ∘ ⟨Y'₁, ⟨Y'₂, X'⟩⟩ ; μA] = H[⟨Y'₁, ⟨Y'₂, X'⟩⟩ ; μA] refine entropy_comp_of_injective μA ?_ f ?_ · exact Measurable.prod hY'₁_meas <| Measurable.prod hY'₂_meas hX'_meas · exact fun ⟨_, _, _⟩ _ h ↦ by simp [f] at h; obtain ⟨_, _, _⟩ := h; simp_all _ = H[X ; μ] + 2 * H[Y ; μ'] := by have : IndepFun Y'₁ (prod Y'₂ X') μA := Indep.symm <| h_indep.indepFun_prodMk h_meas 2 0 1 (by decide) (by decide) rw [this.entropy_pair_eq_add hY'₁_meas (by exact Measurable.prod hY'₂_meas hX'_meas), IndepFun.entropy_pair_eq_add hY'₂_meas hX'_meas (h_indep.indepFun (show 2 ≠ 0 by decide)), hX'_ident.entropy_eq, hY'₁_ident.entropy_eq, hY'₂_ident.entropy_eq] group have : H[⟨Y'₁, X' - Y'₁ - Y'₂⟩ ; μA] = H[Y ; μ'] + H[X' - Y'₂ ; μA] := calc H[⟨Y'₁, X' - Y'₁ - Y'₂⟩ ; μA] = H[f ∘ ⟨Y'₁, X' - Y'₂⟩ ; μA] := by rw [sub_right_comm] ; rfl _ = H[⟨Y'₁, X' - Y'₂⟩ ; μA] := entropy_comp_of_injective μA (by exact Measurable.prod hY'₁_meas <| Measurable.sub hX'_meas hY'₂_meas) f hf _ = H[Y ; μ'] + H[X' - Y'₂ ; μA] := by have : FiniteRange (X' - Y'₂) := FiniteRange.sub X' Y'₂ convert IndepFun.entropy_pair_eq_add hY'₁_meas (hX'_meas.sub hY'₂_meas) <| h_indep.indepFun_sub_right h_meas 1 0 2 (by decide) (by decide) exact hY'₁_ident.entropy_eq.symm have : H[⟨Y'₂, X' - Y'₁ - Y'₂⟩ ; μA] = H[Y ; μ'] + H[X' - Y'₁ ; μA] := calc H[⟨Y'₂, X' - Y'₁ - Y'₂⟩ ; μA] = H[f ∘ ⟨Y'₂, X' - Y'₁⟩ ; μA] := rfl _ = H[⟨Y'₂, X' - Y'₁⟩ ; μA] := entropy_comp_of_injective μA (by exact Measurable.prod hY'₂_meas <| Measurable.sub hX'_meas hY'₁_meas) f hf _ = H[Y ; μ'] + H[X' - Y'₁ ; μA] := by have : FiniteRange (X' - Y'₁) := FiniteRange.sub X' Y'₁ convert IndepFun.entropy_pair_eq_add hY'₂_meas (hX'_meas.sub hY'₁_meas) <| h_indep.indepFun_sub_right h_meas 2 0 1 (by decide) (by decide) exact hY'₂_ident.entropy_eq.symm have : H[⟨Y'₁, ⟨Y'₂, X' - Y'₁ - Y'₂⟩⟩ ; μA] + H[X' - Y'₁ - Y'₂ ; μA] ≤ H[⟨Y'₁, X' - Y'₁ - Y'₂⟩ ; μA] + H[⟨Y'₂, X' - Y'₁ - Y'₂⟩ ; μA] := by apply entropy_triple_add_entropy_le μA hY'₁_meas hY'₂_meas exact Measurable.sub (Measurable.sub hX'_meas hY'₁_meas) hY'₂_meas have : H[X' - Y'₁ - Y'₂ ; μA] ≤ 2 * d[X ; μ # Y ; μ'] + H[Y ; μ'] := calc H[X' - Y'₁ - Y'₂ ; μA] ≤ H[X' - Y'₁ ; μA] + H[X' - Y'₂ ; μA] - H[X ; μ] := by linarith _ = 2 * d[X ; μ # Y ; μ'] + H[Y ; μ'] := by nth_rw 1 [two_mul, ← hX'_ident.rdist_eq hY'₁_ident, ← hX'_ident.rdist_eq hY'₂_ident] have h1 : d[X' ; μA # Y'₁ ; μA] = H[X' - Y'₁ ; μA] - H[X' ; μA] / 2 - H[Y'₁ ; μA] / 2 := (h_indep.indepFun (show 0 ≠ 1 by decide)).rdist_eq hX'_meas hY'₁_meas have h2 : d[X' ; μA # Y'₂ ; μA] = H[X' - Y'₂ ; μA] - H[X' ; μA] / 2 - H[Y'₂ ; μA] / 2 := (h_indep.indepFun (show 0 ≠ 2 by decide)).rdist_eq hX'_meas hY'₂_meas rw [h1, h2, hY'₁_ident.entropy_eq, hY'₂_ident.entropy_eq, hX'_ident.entropy_eq] group have : d[X ; μ # 2 • Y ; μ'] ≤ d[Y'₁ ; μA # Y'₂ ; μA] + (H[Y ; μ'] - H[X ; μ]) / 2 + 2 * d[X ; μ # Y ; μ'] := calc d[X ; μ # 2 • Y ; μ'] = H[X' - 2 • Y'₁ ; μA] - H[X ; μ] / 2 - H[2 • Y ; μ'] / 2 := by have h2Y_ident : IdentDistrib (2 • Y'₁) (2 • Y) (μ := μA) (ν := μ') := by convert hY'₁_ident.comp <| .of_discrete (f := fun g ↦ 2 • g) have h2Y_indep : IndepFun X' (2 • Y'₁) (μ := μA) := by convert (h_indep.indepFun (show 0 ≠ 1 by decide)).comp measurable_id (measurable_const_smul 2) rw [← hX'_ident.rdist_eq h2Y_ident, h2Y_indep.rdist_eq hX'_meas <| Measurable.const_smul hY'₁_meas 2, hX'_ident.entropy_eq, h2Y_ident.entropy_eq] _ ≤ H[Y'₁ - Y'₂ ; μA] + 2 * d[X ; μ # Y ; μ'] - H[X ; μ] / 2 - H[2 • Y ; μ'] / 2 := by linarith _ = d[Y'₁ ; μA # Y'₂ ; μA] + (H[Y ; μ'] - H[X ; μ]) / 2 + 2 * d[X ; μ # Y ; μ'] := by have H2Y : H[2 • Y ; μ'] = H[Y ; μ'] := by let f (g : G) := 2 • g exact entropy_comp_of_injective μ' hY f (fun _ _ ↦ by simp [f, smul_right_inj]) have : d[Y'₁ ; μA # Y'₂ ; μA] = H[Y'₁ - Y'₂ ; μA] - H[Y'₁ ; μA] / 2 - H[Y'₂ ; μA] / 2 := (h_indep.indepFun (show 1 ≠ 2 by decide)).rdist_eq hY'₁_meas hY'₂_meas rw [this, hY'₁_ident.entropy_eq, hY'₂_ident.entropy_eq, H2Y] group have : d[Y'₁ ; μA # Y'₂ ; μA] ≤ 2 * d[X ; μ # Y ; μ'] := by rw [two_mul] convert rdist_triangle hY'₁_meas hX'_meas hY'₂_meas (μ := μA) (μ' := μA) (μ'' := μA) · exact rdist_symm.trans (hY'₁_ident.rdist_eq hX'_ident).symm · exact (hX'_ident.rdist_eq hY'₂_ident).symm rw [← two_nsmul] linarith [abs_le.mp <| diff_ent_le_rdist hX hY (μ := μ) (μ' := μ')] /-- If `G` is a torsion-free group and `X, Y` are `G`-valued random variables and `φ : G → 𝔽₂^d` is a homomorphism then `H[φ ∘ X ; μ] ≤ 10 * d[X ; μ # Y ; μ']`. -/
pfr/blueprint/src/chapter/weak_pfr.tex:3
pfr/PFR/WeakPFR.lean:88
PFR
weak_PFR_asymm
\begin{theorem}\label{weak-pfr-asymm}\lean{weak_PFR_asymm}\leanok If $A,B\subseteq \mathbb{Z}^d$ are finite non-empty sets then there exist non-empty $A'\subseteq A$ and $B'\subseteq B$ such that \[\log\frac{\lvert A\rvert\lvert B\rvert}{\lvert A'\rvert\lvert B'\rvert}\leq 34d[U_A;U_B]\] such that $\max(\dim A',\dim B')\leq \frac{40}{\log 2} d[U_A;U_B]$. \end{theorem} \begin{proof} \uses{torsion-dist-shrinking, pfr-projection, single-fibres, dimension-def}\leanok Without loss of generality we can assume that $A$ and $B$ are not both inside (possibly distinct) cosets of the same subgroup of $\mathbb{Z}^d$, or we just replace $\mathbb{Z}^d$ with that subgroup. We prove the result by induction on $\lvert A\rvert+\lvert B\rvert$. Let $\phi:\mathbb{Z}^d\to \mathbb{F}_2^d$ be the natural mod-2 homomorphism. By \Cref{torsion-dist-shrinking} \[\max(\mathbb{H}(\phi(U_A)),\mathbb{H}(\phi(U_B)))\leq 10d[U_A;U_B].\] We now apply \Cref{pfr-projection}, obtaining some subgroup $H\leq \mathbb{F}_2^d$ such that \[\log \lvert H\rvert \leq 40d[U_A;U_B]\] and \[\mathbb{H}(\tilde{\phi}(U_A))+\mathbb{H}(\tilde{\phi}(U_B))\leq 34 d[\tilde{\phi}(U_A);\tilde{\phi}(U_B)]\] where $\tilde{\phi}:\mathbb{Z}^d\to \mathbb{F}_2^d/H$ is $\phi$ composed with the projection onto $\mathbb{F}_2^d/H$. By \Cref{single-fibres} there exist $x,y\in \mathbb{F}_2^d/H$ such that, with $A_x=A\cap \tilde{\phi}^{-1}(x)$ and similarly for $B_y$, \[\log \frac{\lvert A\rvert\lvert B\rvert}{\lvert A_x\rvert\lvert B_y\rvert}\leq 34(d[U_A;U_B]-d[U_{A_x};U_{B_y}]).\] Suppose first that $\lvert A_x\rvert+\lvert B_y\rvert=\lvert A\rvert+\lvert B\rvert$. This means that $\tilde{\phi}(A)=\{x\}$ and $\tilde{\phi}(B)=\{y\}$, and hence both $A$ and $B$ are in cosets of $\ker \tilde{\phi}$. Since by assumption $A,B$ are not in cosets of a proper subgroup of $\mathbb{Z}^d$ this means $\ker\tilde{\phi}=\mathbb{Z}^d$, and so (examining the definition of $\tilde{\phi}$) we must have $H=\mathbb{F}_2^d$. Then our bound on $\log\lvert H\rvert$ forces $d\leq \frac{40}{\log 2}d[U_A;U_B]$ and we are done with $A'=A$ and $B'=B$. Otherwise, \[\lvert A_x\rvert+\lvert B_y\rvert <\lvert A\rvert+\lvert B\rvert.\] By induction we can find some $A'\subseteq A_x$ and $B'\subseteq B_y$ such that $\dim A',\dim B'\leq \frac{40}{\log 2} d[U_{A_x};U_{B_y}]\leq \frac{40}{\log 2}d[U_A;U_B]$ and \[\log \frac{\lvert A_x\rvert\lvert B_y\rvert}{\lvert A'\rvert\lvert B'\rvert}\leq 34d[U_{A_x};U_{B_y}].\] Adding these inequalities implies \[\log\frac{\lvert A\rvert\lvert B\rvert}{\lvert A'\rvert\lvert B'\rvert}\leq 34d[U_A;U_B]\] as required. \end{proof}
lemma weak_PFR_asymm (A B : Set G) [Finite A] [Finite B] (hA : A.Nonempty) (hB : B.Nonempty) : WeakPFRAsymmConclusion A B := by let P : ℕ → Prop := fun M ↦ (∀ (G : Type _) (hG_comm : AddCommGroup G) (_hG_free : Module.Free ℤ G) (_hG_fin : Module.Finite ℤ G) (_hG_count : Countable G) (hG_mes : MeasurableSpace G) (_hG_sing : MeasurableSingletonClass G) (A B : Set G) (_hA_fin : Finite A) (_hB_fin : Finite B) (_hA_non : A.Nonempty) (_hB_non : B.Nonempty) (_hM : Nat.card A + Nat.card B ≤ M), WeakPFRAsymmConclusion A B) suffices ∀ M, (∀ M', M' < M → P M') → P M by set M := Nat.card A + Nat.card B have hM : Nat.card A + Nat.card B ≤ M := Nat.le_refl _ convert (Nat.strong_induction_on (p := P) M this) G ‹_› ‹_› ‹_› ‹_› _ ‹_› A B ‹_› ‹_› ‹_› ‹_› hM intro M h_induct -- wlog we can assume A, B are not in cosets of a smaller subgroup suffices ∀ (G : Type _) (hG_comm : AddCommGroup G) (_hG_free : Module.Free ℤ G) (_hG_fin : Module.Finite ℤ G) (_hG_count : Countable G) (hG_mes : MeasurableSpace G) (_hG_sing : MeasurableSingletonClass G) (A B : Set G) (_hA_fin : Finite A) (_hB_fin : Finite B) (_hA_non : A.Nonempty) (_hB_non : B.Nonempty) (_hM : Nat.card A + Nat.card B ≤ M) (_hnot : NotInCoset A B), WeakPFRAsymmConclusion A B by intro G hG_comm hG_free hG_fin hG_count hG_mes hG_sing A B hA_fin hB_fin hA_non hB_non hM obtain ⟨G', A', B', hAA', hBB', hnot'⟩ := wlog_notInCoset hA_non hB_non have hG'_fin : Module.Finite ℤ G' := Module.Finite.iff_fg (N := AddSubgroup.toIntSubmodule G').2 (IsNoetherian.noetherian _) have hG'_free : Module.Free ℤ G' := by rcases Submodule.nonempty_basis_of_pid (Module.Free.chooseBasis ℤ G) (AddSubgroup.toIntSubmodule G') with ⟨n, ⟨b⟩⟩ exact Module.Free.of_basis b have hAA'_card : Nat.card A = Nat.card A' := (Nat.card_image_of_injective Subtype.val_injective _) ▸ hAA'.card_congr have hBB'_card : Nat.card B = Nat.card B' := (Nat.card_image_of_injective Subtype.val_injective _) ▸ hBB'.card_congr have hA_non' : Nonempty A := Set.nonempty_coe_sort.mpr hA_non have hB_non' : Nonempty B := Set.nonempty_coe_sort.mpr hB_non rw [hAA'_card, hBB'_card] at hM have hA'_nonfin : A'.Nonempty ∧ Finite A' := by simpa [-Subtype.exists, hAA'_card, Nat.card_pos_iff] using Nat.card_pos (α := A) have hB'_nonfin : B'.Nonempty ∧ Finite B' := by simpa [-Subtype.exists, hBB'_card, Nat.card_pos_iff] using Nat.card_pos (α := B) obtain ⟨hA'_non, hA'_fin⟩ := hA'_nonfin obtain ⟨hB'_non, hB'_fin⟩ := hB'_nonfin replace this := this G' _ hG'_free hG'_fin (by infer_instance) (by infer_instance) (by infer_instance) A' B' hA'_fin hB'_fin hA'_non hB'_non hM hnot' exact conclusion_transfers G' A' B' hAA' hBB' hA'_non hB'_non this intro G hG_comm hG_free hG_fin hG_count hG_mes hG_sing A B hA_fin hB_fin hA_non hB_non hM hnot rcases weak_PFR_asymm_prelim A B hA_non hB_non with ⟨N, x, y, Ax, By, hAx_non, hBy_non, hAx_fin, hBy_fin, hAx, hBy, hdim, hcard⟩ have hAxA : Ax ⊆ A := by rw [hAx]; simp have hByB : By ⊆ B := by rw [hBy]; simp have hA_pos : (0 : ℝ) < Nat.card A := Nat.cast_pos.mpr (@Nat.card_pos _ hA_non.to_subtype _) have hB_pos : (0 : ℝ) < Nat.card B := Nat.cast_pos.mpr (@Nat.card_pos _ hB_non.to_subtype _) rcases lt_or_ge (Nat.card Ax + Nat.card By) (Nat.card A + Nat.card B) with h | h · replace h := h_induct (Nat.card Ax + Nat.card By) (h.trans_le hM) G hG_comm hG_free hG_fin hG_count hG_mes hG_sing Ax By (Set.finite_coe_iff.mpr hAx_fin) (Set.finite_coe_iff.mpr hBy_fin) hAx_non hBy_non (Eq.le rfl) rcases h with ⟨A', B', hA', hB', hA'_non, hB'_non, hcard_ineq, hdim_ineq⟩ use A', B' have hAx_fin' := Set.finite_coe_iff.mpr hAx_fin have hBy_fin' := Set.finite_coe_iff.mpr hBy_fin have hA'_fin' := Set.finite_coe_iff.mpr (Set.Finite.subset hAx_fin hA') have hB'_fin' := Set.finite_coe_iff.mpr (Set.Finite.subset hBy_fin hB') have hAx_non' := Set.nonempty_coe_sort.mpr hAx_non have hBy_non' := Set.nonempty_coe_sort.mpr hBy_non have hA'_non' := Set.nonempty_coe_sort.mpr hA'_non have hB'_non' := Set.nonempty_coe_sort.mpr hB'_non have hAx_pos : (0 : ℝ) < Nat.card Ax := Nat.cast_pos.mpr Nat.card_pos have hBy_pos : (0 : ℝ) < Nat.card By := Nat.cast_pos.mpr Nat.card_pos have hA'_pos : (0 : ℝ) < Nat.card A' := Nat.cast_pos.mpr Nat.card_pos have hB'_pos : (0 : ℝ) < Nat.card B' := Nat.cast_pos.mpr Nat.card_pos have hAxA_le : (Nat.card Ax : ℝ) ≤ (Nat.card A : ℝ) := Nat.cast_le.mpr (Nat.card_mono A.toFinite hAxA) have hByB_le : (Nat.card By : ℝ) ≤ (Nat.card B : ℝ) := Nat.cast_le.mpr (Nat.card_mono B.toFinite hByB) refine ⟨hA'.trans hAxA, hB'.trans hByB, hA'_non, hB'_non, ?_, ?_⟩ · rw [four_logs hA_pos hB_pos hA'_pos hB'_pos] rw [four_logs hAx_pos hBy_pos hA'_pos hB'_pos] at hcard_ineq linarith only [hcard, hcard_ineq] apply hdim_ineq.trans gcongr linarith only [Real.log_le_log hAx_pos hAxA_le, Real.log_le_log hBy_pos hByB_le, hcard] use A, B refine ⟨Eq.subset rfl, Eq.subset rfl, hA_non, hB_non, ?_, ?_⟩ · have := hA_non.to_subtype have := hB_non.to_subtype apply LE.le.trans _ <| mul_nonneg (by norm_num) <| setRuzsaDist_nonneg A B rw [div_self (by positivity)] simp have hAx_eq : Ax = A := by apply Set.Finite.eq_of_subset_of_card_le A.toFinite hAxA linarith only [h, Nat.card_mono B.toFinite hByB] have hBy_eq : By = B := by apply Set.Finite.eq_of_subset_of_card_le B.toFinite hByB linarith only [h, Nat.card_mono A.toFinite hAxA] have hN : N = ⊤ := by have : (A-A) ∪ (B-B) ⊆ N := by rw [← hAx_eq, ← hBy_eq, hAx, hBy] intro z hz simp only [mk'_apply, mem_union, mem_sub, mem_setOf_eq] at hz convert (QuotientAddGroup.eq_zero_iff z).mp ?_ · infer_instance rcases hz with ⟨a, ⟨-, ha⟩, a', ⟨-, ha'⟩, haa'⟩ | ⟨b, ⟨-, hb⟩, b', ⟨-,hb'⟩, hbb'⟩ · rw [← haa']; simp [ha, ha'] rw [← hbb']; simp [hb, hb'] rw [← AddSubgroup.closure_le, hnot] at this exact top_le_iff.mp this have : Nat.card (G ⧸ N) = 1 := by rw [Nat.card_eq_one_iff_unique] constructor · rw [hN] exact QuotientAddGroup.subsingleton_quotient_top infer_instance simp only [this, Nat.cast_one, log_one, zero_add] at hdim rw [← le_div_iff₀' (by positivity)] at hdim convert le_trans ?_ hdim using 1 · field_simp simp only [Nat.cast_max, max_le_iff, Nat.cast_le] exact ⟨AffineSpace.finrank_le_moduleFinrank, AffineSpace.finrank_le_moduleFinrank⟩ /-- If $A\subseteq \mathbb{Z}^d$ is a finite non-empty set with $d[U_A;U_A]\leq \log K$ then there exists a non-empty $A'\subseteq A$ such that $\lvert A'\rvert\geq K^{-17}\lvert A\rvert$ and $\dim A'\leq \frac{40}{\log 2} \log K$. -/
pfr/blueprint/src/chapter/weak_pfr.tex:170
pfr/PFR/WeakPFR.lean:917
PFR
weak_PFR_int
\begin{theorem}\label{weak-pfr-int}\lean{weak_PFR_int}\leanok Let $A\subseteq \mathbb{Z}^d$ and $\lvert A-A\rvert\leq K\lvert A\rvert$. There exists $A'\subseteq A$ such that $\lvert A'\rvert \geq K^{-17}\lvert A\rvert$ and $\dim A' \leq \frac{40}{\log 2}\log K$. \end{theorem} \begin{proof}\leanok \uses{weak-pfr-symm,dimension-def} As in the beginning of \Cref{pfr} the doubling condition forces $d[U_A;U_A]\leq \log K$, and then we apply \Cref{weak-pfr-symm}. \end{proof}
theorem weak_PFR_int {G : Type*} [AddCommGroup G] [Module.Free ℤ G] [Module.Finite ℤ G] {A : Set G} [A_fin : Finite A] (hnA : A.Nonempty) {K : ℝ} (hA : Nat.card (A-A) ≤ K * Nat.card A) : ∃ A' : Set G, A' ⊆ A ∧ Nat.card A' ≥ K ^ (-17 : ℝ) * Nat.card A ∧ AffineSpace.finrank ℤ A' ≤ (40 / log 2) * log K := by have : Nonempty (A - A) := (hnA.sub hnA).coe_sort have : Finite (A - A) := Set.Finite.sub A_fin A_fin have hK : 0 < K := by have : 0 < K * Nat.card A := lt_of_lt_of_le (mod_cast Nat.card_pos) hA nlinarith have : Countable G := countable_of_finiteDimensional ℤ G let m : MeasurableSpace G := ⊤ apply weak_PFR hnA hK ((setRuzsaDist_le A A hnA hnA).trans _) suffices log (Nat.card (A-A)) ≤ log K + log (Nat.card A) by linarith rw [← log_mul (by positivity) _] · apply log_le_log _ hA norm_cast exact Nat.card_pos exact_mod_cast ne_of_gt (@Nat.card_pos _ hnA.to_subtype _)
pfr/blueprint/src/chapter/weak_pfr.tex:213
pfr/PFR/WeakPFR.lean:1088