Salīdzināt revīzijas
2 commits
87a8bbbf0f
...
95cf9001ad
| Autors | SHA1 | Datums | |
|---|---|---|---|
|
|
95cf9001ad | ||
|
|
0cb1203a30 |
1 mainīti faili ar 102 papildinājumiem un 3 dzēšanām
|
|
@ -123,6 +123,106 @@ with 10 packets.
|
|||
|
||||
\section{Next Fit Dual Bin Packing algorithm}
|
||||
|
||||
\subsection{La giga demo}
|
||||
|
||||
Let $ k \in \mathbb{N} $. Let $ (U_n)_{n \in \mathbb{N}} $ be a sequence of
|
||||
independent random variables with uniform distribution on $ [0, 1] $, representing
|
||||
the size of the $ n $-th item.
|
||||
|
||||
Let $ i \in \mathbb{N} $. $ T_i $ denotes the number of items in the $ i $-th
|
||||
bin. We have that
|
||||
|
||||
\begin{equation}
|
||||
T_i = k \iff U_1 + U_2 + \ldots + U_{k-1} < 1 \text{ and } U_1 + U_2 + \ldots + U_{k} \geq 1
|
||||
\end{equation}
|
||||
|
||||
Let $ A_k = \{ U_1 + U_2 + \ldots + U_{k-1} < 1 \}$. Hence,
|
||||
|
||||
\begin{align*}
|
||||
% TODO = k
|
||||
P(T_i = k)
|
||||
& = P(A_{k-1} \cap A_k^c) \\
|
||||
& = P(A_{k-1}) - P(A_k) \qquad \text{ (as $ A_k \subset A_{k-1} $)} \\
|
||||
\end{align*}
|
||||
|
||||
We will try to show that $ \forall k \geq 2 $, $ P(A_k) = \frac{1}{k!} $. To do
|
||||
so, we will use induction to prove the following proposition \eqref{eq:induction},
|
||||
$ \forall k \geq 2 $:
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:induction}
|
||||
\tag{$ \mathcal{H}_k $}
|
||||
P(U_1 + U_2 + \ldots + U_{k-1} < a) = \frac{a^k}{k!} \qquad \forall a \in [0, 1],
|
||||
\end{equation}
|
||||
|
||||
Let us denote $ S_k = U_1 + U_2 + \ldots + U_{k-1} \qquad \forall k \geq 2 $.
|
||||
|
||||
\paragraph{Base cases} $ k = 2 $ : $ P(U_1 < a) = a \neq \frac{a^2}{2}$ supposedly proving $ (\mathcal{H}_2) $.
|
||||
|
||||
$ k = 2 $ : \[ P(U_1 + U_2 < a) = \iint_{\cal{D}} f_{U_1, U_2}(x, y) \cdot (x + y) dxdy \]
|
||||
|
||||
Where $ \mathcal{D} = \{ (x, y) \in [0, 1]^2 \mid x + y < a \} $.
|
||||
|
||||
$ U_1 $ and $ U_2 $ are independent, so
|
||||
\begin{align*}
|
||||
f_{U_1, U_2}(x, y) & = f_{U_1}(x) \cdot f_{U_2}(y) \\
|
||||
& = \begin{cases}
|
||||
1 & \text{if } x \in [0, 1] \text{ and } y \in [0, 1] \\
|
||||
0 & \text{otherwise} \\
|
||||
\end{cases} \\
|
||||
\end{align*}
|
||||
|
||||
Hence,
|
||||
|
||||
\begin{align*}
|
||||
P(U_1 + U_2 < a)
|
||||
& = \iint_{\cal{D}} (x + y)dxdy \\
|
||||
& = \int_{0}^{a} \int_{0}^{a - x} (x + y) dy dx \\
|
||||
& = \int_{0}^{a} \left[ xy + \frac{y^2}{2} \right]_{y=0}^{y=a - x} dx \\
|
||||
& = \int_{0}^{a} \left( ax - x^2 + \frac{a^2}{2} - ax + \frac{x^2}{2} \right) dx \\
|
||||
& = \int_{0}^{a} \left( \frac{a^2}{2} - \frac{x^2}{2} \right) dx \\
|
||||
& = \left[ \frac{a^2 x}{2} - \frac{x^3}{6} \right]_{0}^{a} \\
|
||||
& = \frac{a^3}{2} - \frac{a^3}{6} \\
|
||||
\end{align*}
|
||||
|
||||
|
||||
\paragraph{Induction step} For a fixed $ k > 2 $, we assume that $
|
||||
(\mathcal{H}_{k-1}) $ is true. We will try to prove $ (\mathcal{H}_{k}) $.
|
||||
|
||||
\[
|
||||
P(S_{k-1} + U_{k-1} < a)
|
||||
= \iint_{\cal{D}} f_{S_{k-1}, U_{k-1}}(x, y) \cdot (x + y) dxdy \\
|
||||
\]
|
||||
where $ \mathcal{D} = \{ (x, y) \in [0, 1]^2 \mid x + y < a \} $.
|
||||
As $ S_{k-1} $ and $ U_{k-1} $ are independent,
|
||||
\[
|
||||
P(S_{k-1} + U_{k-1} < a)
|
||||
= \iint_{\cal{D}} f_{S_{k-1}}(x) \cdot f_{U_{k-1}}(y) \cdot (x + y) dxdy \qquad \\
|
||||
\]
|
||||
|
||||
$ (\mathcal{H}_{k-1}) $ gives us that $ \forall x \in [0, 1] $,
|
||||
$ F_{S_{k-1}}(x) = P(S_{k-1} < x) = \frac{x^{k-1}}{(k-1)!} $.
|
||||
|
||||
By differentiating, we get that $ \forall x \in [0, 1] $,
|
||||
|
||||
\[
|
||||
f_{S_{k-1}}(x) = F'_{S_{k-1}}(x) = \frac{x^{k-2}}{(k-2)!}
|
||||
\]
|
||||
|
||||
Furthermore, $ U_{k-1} $ is uniformly distributed on $ [0, 1] $, so
|
||||
$ f_{U_{k-1}}(y) = 1 $.
|
||||
|
||||
\begin{align*}
|
||||
\text{Hence, }
|
||||
P(S_{k-1} + U_{k-1} < a)
|
||||
& =
|
||||
& = \frac{a^{k}}{k!}
|
||||
\end{align*}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
\section{Complexity and implementation optimization}
|
||||
|
||||
The NFBP algorithm has a linear complexity $ O(n) $, as we only need to iterate
|
||||
|
|
@ -140,10 +240,10 @@ store the current sum and the current sum of squares for different variables.
|
|||
While the mean can easily be calculated by summing then dividing, the variance
|
||||
can be calculated using the following formula:
|
||||
|
||||
\begin{align}
|
||||
\begin{align*}
|
||||
{S_N}^2 & = \frac{1}{N-1} \sum_{i=1}^{N} (X_i - \overline{X})^2 \\
|
||||
& = \frac{1}{N-1} \sum_{i=1}^{N} X_i^2 - \frac{N}{N-1} \overline{X}^2
|
||||
\end{align}
|
||||
\end{align*}
|
||||
|
||||
The sum $ \frac{1}{N-1} \sum_{i=1}^{N} X_i^2 $ can be calculated iteratively
|
||||
after each simulation.
|
||||
|
|
@ -155,7 +255,6 @@ the above formulae to calculate the mean and variance of $ N = 10^6 $ random
|
|||
numbers. We wrote the following algorithms \footnotemark :
|
||||
|
||||
\footnotetext{The full code used to measure performance can be found in Annex \ref{annex:performance}.}
|
||||
% TODO annex
|
||||
|
||||
\paragraph{Intuitive algorithm} Store values first, calculate later
|
||||
|
||||
|
|
|
|||
Notiek ielāde…
Atsaukties uz šo jaunā problēmā