diff --git a/Article/Fig_auto/dssDetailNew.png b/Article/Fig_auto/dssDetailNew.png new file mode 100644 index 0000000000000000000000000000000000000000..0fd7e34e7e520b45c8131d6d4b06d6b3aa8cd717 Binary files /dev/null and b/Article/Fig_auto/dssDetailNew.png differ diff --git a/Article/Fig_auto/dssDetailOld.png b/Article/Fig_auto/dssDetailOld.png new file mode 100644 index 0000000000000000000000000000000000000000..f8a8e770642ebd3f871c6c8cec08709d0e84fe53 Binary files /dev/null and b/Article/Fig_auto/dssDetailOld.png differ diff --git a/Article/Fig_auto/dssNew.png b/Article/Fig_auto/dssNew.png new file mode 100644 index 0000000000000000000000000000000000000000..6b8233b2f242e1a74ca82784a8af88b19c81c34a Binary files /dev/null and b/Article/Fig_auto/dssNew.png differ diff --git a/Article/Fig_auto/dssOld.png b/Article/Fig_auto/dssOld.png new file mode 100644 index 0000000000000000000000000000000000000000..65adc03248eb2c501ba55e3a6e63b9786e0932d5 Binary files /dev/null and b/Article/Fig_auto/dssOld.png differ diff --git a/Article/Fig_method/workflow.tex b/Article/Fig_method/workflow.tex index 77d18c879fd4c74629df15a123b3a43e01d7e14f..6e9900cc20d98230e82758173cba09e140808227 100755 --- a/Article/Fig_method/workflow.tex +++ b/Article/Fig_method/workflow.tex @@ -10,7 +10,7 @@ %\put(102,0){\framebox(56,30)} \multiput(102,15)(28,9){2}{\line(3,-1){28}} \multiput(102,15)(28,-9){2}{\line(3,1){28}} - \put(100,0){\makebox(60,30){Valid ?}} + \put(100,0){\makebox(60,28){Valid ?}} \put(133,-2){\scriptsize $\emptyset$} \put(130,6){\vector(0,-1){10}} \put(159,18){\scriptsize $(C,\vec{D})$} @@ -20,7 +20,12 @@ \put(186,4){\makebox(60,10){tracking}} \put(250,18){\scriptsize $\mathcal{B}'$} \put(242,15){\vector(1,0){24}} - \put(266,0){\framebox(56,30){Filtering}} - \put(330,18){\scriptsize $\mathcal{B}''$} +% \put(266,0){\framebox(56,30){Filtering}} + \multiput(266,15)(28,9){2}{\line(3,-1){28}} + \multiput(266,15)(28,-9){2}{\line(3,1){28}} + \put(264,0){\makebox(60,28){Accept ?}} + \put(297,-2){\scriptsize $\emptyset$} + \put(294,6){\vector(0,-1){10}} + \put(330,18){\scriptsize $\mathcal{B}'$} \put(322,15){\vector(1,0){22}} \end{picture} diff --git a/Article/expe.tex b/Article/expe.tex index 49090bd14587b139dca1365637f8fdaeae58f6e1..5a1009a3e8366bb7e4c11cd31501dafb5b19426a 100755 --- a/Article/expe.tex +++ b/Article/expe.tex @@ -2,17 +2,87 @@ \label{sec:expe} -The evaluation stage aims at quantifying the advantages of the new detector -compared to the former one. +The main goal of this work is to provide straight segments with a quality +indication through the associated width parameter. +In lack of available reference tool, the evaluation stage mostly aims +at quantifying the advantages of the new detector compared to the previous +detector in unsupervised context. For a fair comparison, the process flow of the former method (the initial detection followed by two refinement steps) is integrated as an option into the code of the new detector, so that both methods rely on the same optimized basic routines. -\input{expeSynthese} +The first test (\RefFig{fig:synth}) compares the performance of both +detectors on a set of 1000 synthesized images containing 10 randomly +placed input segments with random width between 1 and 4 pixels. +Altough this perfect world context with low gradient noise tends to soften +the old detector weaknesses, the results of \RefTab{tab:synth} show slightly +better width and angle measurements on long segments for the new detector. +The new detector generates more small segments that degrade the angle +estimations, but it produces a smaller amount of false detections and +succeeds in finding most of the input segments. -\input{expeHard} +The second test (\RefFig{fig:hard}) visually compares the results of +both detectors on quite difficult images, even for other detectors from +the literature, with a lot of gradient noise. +The new detector provides less outliers and misaligned segments, and +globally more relevant informations to infere the structure of the brick wall. +\begin{figure}[h] +%\center + \begin{tabular}{ + c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}c} + \includegraphics[width=0.19\textwidth]{Fig_synth/statsExample.png} & + \includegraphics[width=0.19\textwidth]{Fig_synth/statsoldPoints.png} & + \includegraphics[width=0.19\textwidth]{Fig_synth/statsoldBounds.png} & + \includegraphics[width=0.19\textwidth]{Fig_synth/statsnewPoints.png} & + \includegraphics[width=0.19\textwidth]{Fig_synth/statsnewBounds.png} + \begin{picture}(1,1) + \put(-310,0){a)} + \put(-240,0){b)} + \put(-170,0){c)} + \put(-100,0){d)} + \put(-30,0){e)} + \end{picture} + \end{tabular} + \caption{Evaluation on synthesized images: + a) one of the test images, + b) output blurred segments from the old detector and + c) their enclosing digital segments, + d) output blurred segments from the new detector and + e) their enclosing digital segments.} + \label{fig:synth} +\end{figure} +\begin{table} +\centering +\input{Fig_synth/statsTable} +\caption{Measured performance of both detectors on a set of synthesized images. +$S$ is the set of all the input segments, +$D$ the set of all the detected blurred segments.} +\label{tab:synth} +\end{table} +\begin{figure} +%\center + \begin{tabular}{ + c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}} + \includegraphics[width=0.32\textwidth]{Fig_method/parpaings.png} & + \includegraphics[width=0.32\textwidth]{Fig_hard/hardOld.png} & + \includegraphics[width=0.32\textwidth]{Fig_hard/hardNew.png} + \begin{picture}(1,1) + {\color{dwhite}{ + \put(-286,4.5){\circle*{8}} + \put(-171,4.5){\circle*{8}} + \put(-58,4.5){\circle*{8}} + }} + \put(-288.5,2){a} + \put(-173.5,2){b} + \put(-60.5,2){c} + \end{picture} + \end{tabular} + \caption{Detection results on quite textured images: + one of the tested images (a), + the segments found by the old detector (b) + and those found by the new detector (c).} + \label{fig:hard} +\end{figure} \input{expeAuto} - -%\input{expeOld} diff --git a/Article/expeAuto.tex b/Article/expeAuto.tex index 458972126a09a22862af9483b9420bb2b2216c3f..c5a4948ff3f14927e5e9d297dd69d7f902aa2644 100755 --- a/Article/expeAuto.tex +++ b/Article/expeAuto.tex @@ -1,43 +1,49 @@ -The third series of tests aims at evaluating the performance of the new +The third series of tests aim at evaluating the performance of the new detector wrt the previous one on a selection of more standard images. Compared measures $M$ are the execution time $T$, the amount $N$ of detected blurred segments, the amount $N'$ of long (larger than 40 pixels) segments, the mean length $L$ and the mean width $W$ of the detected segments. -In order to be objective, these results are also compared to the same +For the sake of objectivity, these results are also compared to the same measurements made on the image data base used for the CannyLine line segment detector \cite{LuAl15}. The table \RefTab{tab:auto} gives the measures obtained on one of the selected images (\RefFig{fig:auto}) and the result of a systematic test on the whole CannyLine data base. -The new detector is faster and provides more segments. -These segments are mainly shorter and thinner. -The control of the assigned width to fit to the detected segment width -has the side effect of blocking the segment expansion when the remote parts -are more noisy. -The relevance of this behavior depends strongly on application requirements. \begin{figure}[h] %\center \begin{tabular}{ c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}} \includegraphics[width=0.32\textwidth]{Fig_auto/buro.png} & \includegraphics[width=0.32\textwidth]{Fig_auto/autoOld.png} & - \includegraphics[width=0.32\textwidth]{Fig_auto/autoNew.png} + \includegraphics[width=0.32\textwidth]{Fig_auto/autoNew.png} \\ + & \includegraphics[width=0.22\textwidth]{Fig_auto/dssDetailOld.png} & + \includegraphics[width=0.22\textwidth]{Fig_auto/dssDetailNew.png} \begin{picture}(1,1) + {\color{red}{ + \put(-21.5,38){\framebox(18,8)} + \put(-12.5,38){\vector(-2,-1){25}} + \put(-135.5,38){\framebox(18,8)} + \put(-124.5,38){\vector(-2,-1){25}} + }} {\color{dwhite}{ - \put(-286,4.5){\circle*{8}} - \put(-171,4.5){\circle*{8}} - \put(-58,4.5){\circle*{8}} + \put(-287,37.5){\circle*{8}} + \put(-172,37.5){\circle*{8}} + \put(-59,37.5){\circle*{8}} + \put(-186,5.5){\circle*{8}} + \put(-73,5.5){\circle*{8}} }} - \put(-288.5,2){a} - \put(-173.5,2){b} - \put(-60.5,2){c} + \put(-289.5,35){a} + \put(-174.5,35){b} + \put(-61.5,35){c} + \put(-189,3){d} + \put(-75.5,3){e} \end{picture} \end{tabular} - \caption{Automatic detection of segments: - a) input image, - b) results of the old detector, - c) results of the new detector.} + \caption{Automatic detection on standard images: + an input image (a), the segments found by the old detector (b) + and those found by the new detector (c), and a detail of the + enclosing digital segments for both old (d) and new (e) detectors.} \label{fig:auto} \end{figure} \begin{table} @@ -48,3 +54,22 @@ The relevance of this behavior depends strongly on application requirements. the previous (resp. new) detector.} \label{tab:auto} \end{table} + +The new detector is faster and provides more blurred segments than the +previous one. +The details of \RefFig{fig:auto} d) and e) illustrate the improved +accuracy obtained. +The output segments are thinner but also shorter. +The control of the assigned width to fit to the detected segment width +has the side effect of blocking the segment expansion when the remote parts +are more noisy. +The relevance of this behavior depends strongly on application requirements. +Therefore the control of the assigned width is left as an option the user +can let or cancel. +In both case, it could be interesting to combine the detector with a tool +to merge aligned segments. + +%Although these observations in unsupervised context should be reproduced +%in supervised context, similar experiments require an application context +%to dispose of a ground truth and of real users to assess the detector +%relevance through ergonomy evaluations. diff --git a/Article/expeHard.tex b/Article/expeHard.tex index d7ff5921a24e948649cafe5094c80b56b5e6ca9f..087e0dfc0552aba5a9b6b79abe40b5df768d6374 100755 --- a/Article/expeHard.tex +++ b/Article/expeHard.tex @@ -22,8 +22,8 @@ globally more relevant informations to infere the structure of the brick wall. \end{picture} \end{tabular} \caption{Detection results on quite textured images: - 1) one of the tested images, - 2) the segments found by the old detector, - 3) and the one found by the new detector.} + one of the tested images (a), + the segments found by the old detector (b) + and those found by the new detector (c).} \label{fig:hard} \end{figure} diff --git a/Article/expeSynthese.tex b/Article/expeSynthese.tex index 26c512f19d7ebd839eb121d3c9aea94edf65a5e0..3270a4e966e1fa1b6c03529106ee8dbe1afd9e0c 100755 --- a/Article/expeSynthese.tex +++ b/Article/expeSynthese.tex @@ -27,17 +27,17 @@ succeeds in finding most of the input segments. \end{tabular} \caption{Evaluation on synthesized images: a) one of the test images, - b) output segments points from the old detector and - c) their minimal digital straight segments, - d) output segments points from the new detector and - e) their minimal digital straight segments.} + b) output blurred segments from the old detector and + c) their enclosing digital segments, + d) output blurred segments from the new detector and + e) their enclosing digital segments.} \label{fig:synth} \end{figure} \begin{table} \centering \input{Fig_synth/statsTable} \caption{Measured performance of both detectors on a set of synthesized images. -$S$ is the set of all the input segments pixels, -$D$ the set of all the detected segments pixels.} +$S$ is the set of all the input segments, +$D$ the set of all the detected blurred segments.} \label{tab:synth} \end{table} diff --git a/Article/intro.tex b/Article/intro.tex index fb5fa085d31616b76eec631bce0c41946f6d5cf8..0222bafc54aacf561e2047d3a62c5ca97400d5d3 100755 --- a/Article/intro.tex +++ b/Article/intro.tex @@ -34,11 +34,9 @@ or at least quite few values with intuitive meaning. In a former paper \cite{KerautretEven09}, an efficient tool to detect blurred segments of fixed width in gray-level images was already introduced. -It is based on a first rough detection in a local area -of the image either defined by the user in supervised context or blindly -explored in automatic mode. The goal is to disclose the presence of an edge. -Therefore a simple test as the gradient maximal value is performed. - +It is based on a first rough detection in a local image area +defined by the user. The goal is to disclose the presence of a straight edge. +Therefore an as simple test as the gradient maximal value is performed. In case of success, refinement steps are run through an exploration of the image in the direction of the detected edge. In order to prevent local disturbances such as the presence of a sharper @@ -57,22 +55,20 @@ user according to the application requirements. The produced information on the edge quality is rather poor, and especially when the edge is thin, the risk to incorporate outlier points is quite high, thus producing a biased estimation of the edge orientation. - Then, two refinement steps are systematically run. On one hand, this is useless when the first detection is successfull. On the other hand, there is no guarantee that this approach is able to process larger images. The search direction relies on the support vector of the blurred segment -detected at the former step, and the numerization rounding fixes a limit -on this estimated orientation accuracy. -It results that more steps would inevitably be necessary to process higher -resolution images. +detected at the former step. +Because the numerization rounding fixes a limit on this estimated orientation +accuracy, more steps are inevitably necessary to process larger images. \subsection{Main contributions} The work presented in this paper aims at solving both former mentioned drawbacks through two main contributions: -(i) the concept of adaptive directional scanner designed to get some +(i) the concept of adaptive directional scan designed to get some compliance to the unpredictable orientation problem; (ii) the control of the assigned width to the blurred segment recognition algorithm, intended to derive more reliable information on the edge @@ -84,9 +80,9 @@ which can be evaluated through an online demonstration. In the next section, the main theoretical notions this work relies on are introduced. -The new detector workflow, the adaptive directional scanner, the control +The new detector workflow, the adaptive directional scan, the control of the assigned with and their integration into both supervised and -unsupervised contexts are then presented and discussed in \RefSec{sec:method}. +unsupervised contexts are then presented in \RefSec{sec:method}. Experiments led to assess the expected increase of performance are decribed in \RefSec{sec:expe}. Finally, achieved results are summarized in \RefSec{sec:conclusion}, diff --git a/Article/method.tex b/Article/method.tex index 2371979aec470d17f682c4b8d15ac4393f64294c..61dee6cf217b25fe498ae7e297289a773886a164 100755 --- a/Article/method.tex +++ b/Article/method.tex @@ -4,8 +4,7 @@ \subsection{Workflow of the detection process} -The workflow of the blurred segment detection process is summerized -in the following figure. +The workflow of the detection process is summerized in the following figure. \begin{figure}[h] \center @@ -16,11 +15,11 @@ in the following figure. The initial detection consists in building and extending a blurred segment $\mathcal{B}$ based on the highest gradient points found in each scan -of a static directional scanner based on an input segment $AB$. +of a static directional scan based on an input segment $AB$. Validity tests are then applied to decide of the detection poursuit. They aim at rejecting a too short or too sparse blurred segment, or a -blurred segment with a close orientation to the input segment $AB$. +blurred segment with a too close orientation to the input segment $AB$. In case of positive response, the position $C$ and direction $\vec{D}$ of this initial blurred segment are extracted. @@ -29,13 +28,17 @@ $\mathcal{B}'$ based on points that correspond to local maxima of the image gradient, ranked by magnitude order, and with gradient direction close to a reference gradient direction at the segment first point. At this refinement step, a control of the assigned width is applied -and an adaptive directional scanner based on the found position $C$ and +and an adaptive directional scan based on the found position $C$ and direction $\vec{D}$ is used in order to extends the segment in the appropriate direction. These two improvements are described in the following sections. -The fine track output segment is finally filtered to remove artifacts -and outliers, and a final blurred segment $\mathcal{B}''$ is provided. +The output segment $\mathcal{B}'$ is finally tested according to the +application needs. Too short, too sparse ot too fragmented segments +can be rejected. Length, sparsity or fragmentation thresholds are +intuitive parameters left at the end user disposal. +None of these tests are activated for the experimental stage in order +to put forward achievable perfomance. \subsection{Adaptive directional scan} @@ -53,125 +56,75 @@ is subject to the numerization rounding, and the longer the real segment to detect, the higher the probability to fail again on a blurred segment escape from the directional scan. -%Even in ideal situation where the detected segment is a perfect line, -%its width is never null as a result of the discretization process. -%The estimated direction accuracy is mostly constrained by the length of -%the detected segment. -%To avoid these side escapes, the scan should not be a linear strip but -%rather a conic shape to take into account the blurred segment preimage. -%This side shift is amplified in situations where the blurred segment is -%left free to get thicker in order to capture possible noisy features. -%The assigned width is then still greater than the detected minimal width, -%so that the segment can move within the directional scan. -%Knowing the detected blurred segment shape and the image size, it is -%possible to define a conic scan area, but this solution is computationaly -%expensive because it leads to useless exploration of large image areas. -% -%\begin{figure}[h] -%\center -% %\begin{picture}(300,40) -% %\end{picture} -% \input{Fig_notions/bscone} -% \caption{Possible extension area based -% on the detected blurred segment preimage.} -% \label{fig:cone} -%\end{figure} +\begin{figure}[h] +\center + \begin{tabular}{c@{\hspace{0.2cm}}c} + \includegraphics[width=0.48\textwidth]{Fig_notions/escapeFirst_zoom.png} & + \includegraphics[width=0.48\textwidth]{Fig_notions/escapeSecond_zoom.png} \\ + \multicolumn{2}{c}{ + \includegraphics[width=0.62\textwidth]{Fig_notions/escapeThird_zoom.png}} + \begin{picture}(1,1)(0,0) + {\color{dwhite}{ + \put(-260,87.5){\circle*{8}} + \put(-86,87.5){\circle*{8}} + \put(-172,7.5){\circle*{8}} + }} + \put(-263,85){a} + \put(-89,85){b} + \put(-175,5){c} + \end{picture} + \end{tabular} + \caption{Aborted detections on side escapes of static directional scans + and successful detection using an adaptive directional scan. + The last points added to the left of the blurred segment during + initial detection (a) lead to a bad estimation of its + orientation, and thus to an incomplete fine detection with a + classical directional scan (b). An adaptive directional scan + can continue the segment expansion as far as necessary (c). + On the pictures, the input selection is drawn in red color, + the scan strip bounds + in blue and the detected blurred segment in green.} + \label{fig:escape} +\end{figure} To overcome this issue, in the former work, an additional refinement step is run using the better orientation estimated from the longer segment obtained. It is enough to completely detect most of the tested edges, but certainly not all, especially if larger images with much longer edges are processed. -%The solution implemented in the former work was to let some arbitrary -%margin between the scan strip width and the assigned width to the detection, -%and to perform two fine detection steps, using for each of them the direction -%found at the former step. As a solution, this operation could be itered as long as the blurred segment -escapes from the directional scanner using as any fine detection steps as +escapes from the directional scan using as any fine detection steps as necessary. But at each iteration, already tested points are processed again, thus producing a useless computational cost. Here the proposed solution is to dynamically align the scan direction on -the blurred segment one all along the expansion stage. +the blurred segment all along the expansion stage. At each iteration $i$ of the expansion, the scan strip is aligned on the direction of the blurred segment $\mathcal{B}_{i-1}$ computed at previous iteration $i-1$. More generally, an adaptive directional scan $ADS$ is defined by: \begin{equation} -%S_i = \mathcal{D}_{i-1} \cap \mathcal{N}_i ADS = \left\{ S_i = \mathcal{D}_i \cap \mathcal{N}_i \cap \mathcal{I} \left| \begin{array}{l} \delta(\mathcal{N}_i) = - \delta^{-1}(\mathcal{D}_0) \\ -\wedge~ h_0(\mathcal{N}_i) = h_0(\mathcal{N}_{i-1}) + p(\mathcal{D}) \\ +\wedge~ h(\mathcal{N}_i) = h(\mathcal{N}_{i-1}) + p(\mathcal{D}) \\ \wedge~ \mathcal{D}_{i} = \mathcal{D} (C_{i-1}, \vec{D}_{i-1}, w_{i-1}), i > 1 -%\wedge~ \mathcal{D}_{i} = D (\mathcal{B}_{i-1},\varepsilon + k), i > 1 \end{array} \right. \right\} \end{equation} -%where $D (\mathcal{B}_i,w)$ is the scan strip aligned to the -%detected segment at iteration $i$ with width $w$. -%In practice, the scan width is set a little greater than the assigned -%width $\varepsilon$ ($k$ is a constant arbitrarily set to 4). where $C_{i-1}$, $\vec{D}_{i-1}$ and $w_{i-1}$ are a position, a director vector and a width observed at iteration $i-1$. In the scope of the present detector, $C_{i-1}$ is the intersection of the input selection and the medial axis of $\mathcal{B}_{i-1}$, -$\vec{D}_{i-1}$ the support vector of the narrowest digital straight line -that contains $\mathcal{B}_{i-1}$, -and $w_{i-1}$ a value slightly greater than the minimal width of -$\mathcal{B}_{i-1}$. +$\vec{D}_{i-1}$ the support vector of the enclosing digital segment +$E(\mathcal{B}_{i-1})$, and $w_{i-1}$ a value slightly greater than the +minimal width of $\mathcal{B}_{i-1}$. So the last clause expresses the update of the scan bounds at iteration $i$. Compared to static directional scans, the scan strip moves while scan lines remain fixed. This behavior ensures a complete detection of the blurred segment even when the orientation is badly estimated (\RefFig{fig:escape} c). -\begin{figure}[h] -\center - \begin{tabular}{c@{\hspace{0.2cm}}c} - \includegraphics[width=0.48\textwidth]{Fig_notions/escapeFirst_zoom.png} & - \includegraphics[width=0.48\textwidth]{Fig_notions/escapeSecond_zoom.png} \\ - \multicolumn{2}{c}{ - \includegraphics[width=0.72\textwidth]{Fig_notions/escapeThird_zoom.png}} - \begin{picture}(1,1)(0,0) - {\color{dwhite}{ - \put(-260,100.5){\circle*{8}} - \put(-86,100.5){\circle*{8}} - \put(-172,7.5){\circle*{8}} - }} - \put(-263,98){a} - \put(-89,98){b} - \put(-175,5){c} - \end{picture} - \end{tabular} - \caption{Aborted detections on side escapes of static directional scans - and successful detection using an adaptive directional scan. - The last points added to the left of the blurred segment during - the initial detection (a) lead to a bad estimation of its - orientation, and thus to an incomplete fine detection with - a classical directional scanner (b). This scanner is - advantageously replaced by an adaptive directional scanner - able to continue the segment expansion as far as necessary (c). - The input selection is drawn in red color, the scan strip bounds - in blue and the detected blurred segment in green.} - \label{fig:escape} -\end{figure} - -%\begin{figure}[h] -%\center -% \begin{tabular}{c@{\hspace{0.2cm}}c} -% \includegraphics[width=0.49\textwidth]{Fig_notions/adaptionBounds_zoom.png} -% & \includegraphics[width=0.49\textwidth]{Fig_notions/adaptionLines_zoom.png} -% \end{tabular} -% \caption{Example of blurred segment detection -% using an adaptive directional scan. -% On the right picture, the scan bounds are displayed in red, the -% detected blurred segment in blue, and its bounding lines in green. -% The left picture displays the successive scans. -% Here the adaption is visible at the crossing of the tile joins.} -% \label{fig:adaption} -%\end{figure} - \subsection{Control of the assigned width} The assigned width $\varepsilon$ to the blurred segment recognition algorithm @@ -278,55 +231,6 @@ In edge selection mode (\RefFig{fig:edgeDir} b), the multi-detection algorithm is executed twice, first in main edge selection mode, then in opposite edge selection mode. -%Beyond the possible detection of a large set of edges at once, the -%multi-detection allows the detection of some unaccessible edges in -%classical single detection mode. This is particularly the case of edges -%that are quite close to a more salient edge with a higher gradient, -%as illustrated in \RefFig{fig:voisins}. -%The multi-detection detects both edges and the user may then select -%the awaited one. -% -%\begin{figure}[h] -%\center -% \begin{tabular}{c@{\hspace{0.2cm}}c@{\hspace{0.2cm}}c@{\hspace{0.2cm}}c} -% \includegraphics[width=0.22\textwidth]{Fig_method/voisinImage_zoom.png} & -% \includegraphics[width=0.22\textwidth]{Fig_method/voisinGrad_zoom.png} & -% \includegraphics[width=0.22\textwidth]{Fig_method/voisinSingle_zoom.png} & -% \includegraphics[width=0.22\textwidth]{Fig_method/voisinMulti_zoom.png} \\ -% \parbox{0.22\textwidth}{\centering{\scriptsize{a)}}} & -% \parbox{0.22\textwidth}{\centering{\scriptsize{b)}}} & -% \parbox{0.22\textwidth}{\centering{\scriptsize{c)}}} & -% \parbox{0.22\textwidth}{\centering{\scriptsize{d)}}} -% \end{tabular} -% \caption{Detection of close edges with different sharpness: -% a) input selection across the edges, -% b) gradient map, -% c) in single mode, detection of only the edge with the higher gradient, -% d) in multi-detection mode, detection of both edges. } -% \label{fig:voisins} -%\end{figure} - -%This detection procedure can be used to detect as well straight edges -%as thin straight objects. In the first case, the gradient vectors of all -%edge points are assumed to be oriented in the same direction. But if the -%sign of the gradient direction is not considered, points with gradient in -%opposite directions are merged to build the same blurred segment, allowing -%the detection of both edges of a thin linear structure, like for instance -%the tile joins of \RefFig{fig:edgeDir}. - -%On that example, when a straight feature detection is run -%(\RefFig{fig:edgeDir} a), -%a thick blurred segment which extends up to four tiles is provided. -%When a straight edge detection is run, a very thin blurred segment is -%built to follow only one join edge. -%The multi-detection can also be applied to both thin object or edge detection. -%In the latter case, the detection algorithm is run twice using opposite -%directions, so that in the exemple of figure (\RefFig{fig:edgeDir} b), -%both edges (in different colors) are highlighted. -%These two thin blurred segments are much shorter, probably because the -%tiles are not perfectly aligned. -%This example illustrates the versatility of the new detector. - \subsection{Automatic blurred segment detection} An unsupervised mode is also proposed to automatically detect all the @@ -339,38 +243,6 @@ In the present work, the stroke sweeping step $\delta$ is set to 10 pixels. \input{Fig_method/algoAuto} -%\RefFig{fig:evalAuto}b gives an idea of the automatic detection performance. -%In the example of \RefFig{fig:noisy}, hardly perceptible edges are detected -%despite of a quite textured context. -%Unsurpringly the length of the detected edges is linked to the initial -%value of the assigned width, but a large value also augments the rate -%of interfering outliers insertion. -% -%\begin{figure}[h] -%\center \begin{tabular}{c@{\hspace{0.1cm}}c@{\hspace{0.1cm}}c} -% \includegraphics[width=0.32\textwidth]{Fig_method/parpaings.png} & -% \includegraphics[width=0.32\textwidth]{Fig_method/parpaings2.png} & -% \includegraphics[width=0.32\textwidth]{Fig_method/parpaings3.png} -% \end{tabular} -% \begin{picture}(1,1)(0,0) -% {\color{dwhite}{ -% \put(-286,-25.5){\circle*{8}} -% \put(-171,-25.5){\circle*{8}} -% \put(-58,-25.5){\circle*{8}} -% }} -% \put(-288.5,-28){a} -% \put(-173.5,-28){b} -% \put(-60.5,-28){c} -% \end{picture} -% \caption{Automatic detection of blurred segments on a textured image. -% a) the input image, -% b) automatic detection result with initial assigned width set -% to 3 pixels, -% c) automatic detection result with initial assigned width set -% to 8 pixels.} -% \label{fig:noisy} -%\end{figure} - The automatic detection of blurred segments in a whole image is left available for testing in an online demonstration at the following address: \\ \href{http://ipol-geometry.loria.fr/~kerautre/ipol_demo/AdaptDirBS_IPOLDemo}{ diff --git a/Article/notions.tex b/Article/notions.tex index 00d917d5f802d2a3135eaa4ea441c5578dbcff2c..06b6ed5cf107e2ec7b0874719690026eb9d12371 100755 --- a/Article/notions.tex +++ b/Article/notions.tex @@ -16,8 +16,7 @@ $0 \leq ax + by - c < \nu$. In the following, we note $\delta(\mathcal{L}) = b/a$ the slope of digital line $\mathcal{L}$, $w(\mathcal{L}) = \nu$ its arithmetical width, -$h_P(\mathcal{L}) = c - ax - by$ its {\it shift} to point $P(x,y)$, -$h_0(\mathcal{L}) = c$ its {\it shift} to origin, and +$h(\mathcal{L}) = c$ its {\it shift} to origin, and $p(\mathcal{L}) = max(|a|,|b|)$ its period (i.e. the length of its periodic pattern). When $\nu = p(\mathcal{L})$, then $\mathcal{L}$ is the narrowest 8-connected @@ -25,8 +24,8 @@ line and is called a naive line. \begin{definition} A blurred segment $\mathcal{B}$ of assigned width $\varepsilon$ is a set -of points in $\mathbb{Z}^2$ that all belong to a digital line of -arithmetical width $\nu = \varepsilon$. +of points in $\mathbb{Z}^2$ that all belong to a digital line $\mathcal{L}$ +of arithmetical width $w(\mathcal{L}) = \varepsilon$. \end{definition} A linear-time algorithm to recognize a blurred segment of assigned width @@ -36,8 +35,10 @@ segment when adding each point $P_i$ successively. The minimal width $\mu$ of the blurred segment $\mathcal{B}$ is the arithmetical width of the narrowest digital straight line that contains $\mathcal{B}$. -It is also the minimal width of the convex hull of $\mathcal{B}$, -that can be computed by Melkman's algorithm \cite{Melkman87}. +%It is also the minimal width of the convex hull of $\mathcal{B}$, +%that can be computed by Melkman's algorithm \cite{Melkman87}. +The enclosing digital segment $E(\mathcal{B})$ is the section of this +optimal digital straight line bounded by the end points of $\mathcal{B}$. As depicted on \RefFig{fig:bs}, the extension of the blurred segment $\mathcal{B}_{i-1}$ of assigned width $\varepsilon$ and minimal width $\mu_{i-1}$ at step $i-1$ with a new input @@ -49,24 +50,28 @@ point $P_i$ is thus controlled by the recognition test $\mu_i < \varepsilon$. \caption{A growing blurred segment $\mathcal{B}_i$ : when adding the new point $P_i$, the blurred segment minimal width augments from $\mu_{i-1}$ to $\mu_i$; if the new width $\mu_i$ exceeds -the assigned width $\varepsilon$, then the new input point is rejected.} +the assigned width $\varepsilon$, then the new input point is rejected +and $\mathcal{B}_i = \mathcal{B}_{i-1}$.} \label{fig:bs} \end{figure} -Associated to this primitive, the following definition of directional scan -also based on digital straight lines is also used in the following. +Associated to this primitive, the following definition of a directional scan +also based on digital straight lines is also used in this work. \subsection{Directional scan} +\begin{definition} A directional scan $DS$ is an ordered partition restricted to the image domain $\mathcal{I}$ of a digital straight line $\mathcal{D}$, called the {\it scan strip}, into scans $S_i$, each of them being a segment of a naive line $\mathcal{N}_i$ orthogonal to $\mathcal{D}$. +\end{definition} + \begin{equation} DS = \left\{ S_i = \mathcal{D} \cap \mathcal{N}_i \cap \mathcal{I} \left| \begin{array}{l} \delta(\mathcal{N}_i) = - \delta^{-1}(\mathcal{D}) \\ -\wedge~ h_0(\mathcal{N}_i) = h_0(\mathcal{N}_{i-1}) + p(\mathcal{D}) +\wedge~ h(\mathcal{N}_i) = h(\mathcal{N}_{i-1}) + p(\mathcal{D}) \end{array} \right. \right\} %S_i = \mathcal{D} \cap \mathcal{N}_i, \mathcal{N}_i \perp \mathcal{D} \end{equation} @@ -104,8 +109,8 @@ At each iteration $i$, the scans $S_i$ and $S_{-i}$ are successively processed. \put(-54,32.5){\circle*{14}} \put(-161,10.5){\circle*{20}} }} - \put(-185,112){$A$} - \put(-88,14){$B$} + \put(-88,13.5){$A$} + \put(-185,111.5){$B$} \put(-20,98){$\mathcal{D}$} \put(-137,64){\color{blue}{$S_0$}} \put(-77,94){\color{red}{$S_8$}} @@ -146,8 +151,10 @@ $\mathcal{N}_i^{A,B}$: A directional scan can also be defined by its central point $C(x_C,y_C)$, its direction $\vec{D}(X_D,Y_D)$ and its width $w$. If we note -$c_3 = x_C\cdot Y_D - y_C\cdot X_D$ and -$c_4 = X_D\cdot x_C + Y_D\cdot y_C$, it is then defined by +$c_3 = x_C\cdot Y_D - y_C\cdot X_D$, +$c_4 = X_D\cdot x_C + Y_D\cdot y_C$, +$\nu_{\vec{D}} = max (|X_D|,|Y_D|)$, +it is then defined by the following scan strip $\mathcal{D}^{C,\vec{D},w}$ and scan lines $\mathcal{N}_i^{C,\vec{D},w}$: \begin{equation} @@ -155,6 +162,6 @@ $\mathcal{N}_i^{C,\vec{D},w}$: \mathcal{D}^{C,\vec{D},w} = \mathcal{L}(Y_D,~ -X_D,~ c_3 - w / 2,~ w) \\ \mathcal{N}_i^{C,\vec{D},w} = \mathcal{L}(X_D,~ Y_D,~ - c_4 - w / 2 + i\cdot w,~ max (|X_D|,|Y_D|) + c_4 - w / 2 + i\cdot w,~ \nu_{\vec{D}} \end{array} \right. \end{equation} diff --git a/Code/Seg/BlurredSegment/bstracker.cpp b/Code/Seg/BlurredSegment/bstracker.cpp index cd364ecbdb7d12aa185768b9be36dfc760fd31b7..798b49a2b680bea7ed840fe7be7c659b4de6bf9d 100755 --- a/Code/Seg/BlurredSegment/bstracker.cpp +++ b/Code/Seg/BlurredSegment/bstracker.cpp @@ -449,5 +449,3 @@ void BSTracker::switchScanExtent () maxScan = (maxScan == gMap->getHeightWidthMax () ? DEFAULT_MAX_SCAN : gMap->getHeightWidthMax ()); } - -