diff --git a/Article/Expe_perf/PerfCompare.class b/Expes/Analyses/CompDetectors/PerfCompare.class similarity index 100% rename from Article/Expe_perf/PerfCompare.class rename to Expes/Analyses/CompDetectors/PerfCompare.class diff --git a/Article/Expe_perf/PerfCompare.java b/Expes/Analyses/CompDetectors/PerfCompare.java similarity index 100% rename from Article/Expe_perf/PerfCompare.java rename to Expes/Analyses/CompDetectors/PerfCompare.java diff --git a/Article/Expe_perf/perf_iut_amphi1.txt b/Expes/Analyses/CompDetectors/perf_iut_amphi1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_amphi1.txt rename to Expes/Analyses/CompDetectors/perf_iut_amphi1.txt diff --git a/Article/Expe_perf/perf_iut_amphi2.txt b/Expes/Analyses/CompDetectors/perf_iut_amphi2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_amphi2.txt rename to Expes/Analyses/CompDetectors/perf_iut_amphi2.txt diff --git a/Article/Expe_perf/perf_iut_amphi3.txt b/Expes/Analyses/CompDetectors/perf_iut_amphi3.txt similarity index 100% rename from Article/Expe_perf/perf_iut_amphi3.txt rename to Expes/Analyses/CompDetectors/perf_iut_amphi3.txt diff --git a/Article/Expe_perf/perf_iut_corridor1.txt b/Expes/Analyses/CompDetectors/perf_iut_corridor1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_corridor1.txt rename to Expes/Analyses/CompDetectors/perf_iut_corridor1.txt diff --git a/Article/Expe_perf/perf_iut_corridor2.txt b/Expes/Analyses/CompDetectors/perf_iut_corridor2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_corridor2.txt rename to Expes/Analyses/CompDetectors/perf_iut_corridor2.txt diff --git a/Article/Expe_perf/perf_iut_corridor3.txt b/Expes/Analyses/CompDetectors/perf_iut_corridor3.txt similarity index 100% rename from Article/Expe_perf/perf_iut_corridor3.txt rename to Expes/Analyses/CompDetectors/perf_iut_corridor3.txt diff --git a/Article/Expe_perf/perf_iut_entrance.txt b/Expes/Analyses/CompDetectors/perf_iut_entrance.txt similarity index 100% rename from Article/Expe_perf/perf_iut_entrance.txt rename to Expes/Analyses/CompDetectors/perf_iut_entrance.txt diff --git a/Article/Expe_perf/perf_iut_ig1.txt b/Expes/Analyses/CompDetectors/perf_iut_ig1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_ig1.txt rename to Expes/Analyses/CompDetectors/perf_iut_ig1.txt diff --git a/Article/Expe_perf/perf_iut_ig2.txt b/Expes/Analyses/CompDetectors/perf_iut_ig2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_ig2.txt rename to Expes/Analyses/CompDetectors/perf_iut_ig2.txt diff --git a/Article/Expe_perf/perf_iut_labo1.txt b/Expes/Analyses/CompDetectors/perf_iut_labo1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_labo1.txt rename to Expes/Analyses/CompDetectors/perf_iut_labo1.txt diff --git a/Article/Expe_perf/perf_iut_labo2.txt b/Expes/Analyses/CompDetectors/perf_iut_labo2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_labo2.txt rename to Expes/Analyses/CompDetectors/perf_iut_labo2.txt diff --git a/Article/Expe_perf/perf_iut_library1.txt b/Expes/Analyses/CompDetectors/perf_iut_library1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_library1.txt rename to Expes/Analyses/CompDetectors/perf_iut_library1.txt diff --git a/Article/Expe_perf/perf_iut_mul1.txt b/Expes/Analyses/CompDetectors/perf_iut_mul1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_mul1.txt rename to Expes/Analyses/CompDetectors/perf_iut_mul1.txt diff --git a/Article/Expe_perf/perf_iut_mul2.txt b/Expes/Analyses/CompDetectors/perf_iut_mul2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_mul2.txt rename to Expes/Analyses/CompDetectors/perf_iut_mul2.txt diff --git a/Article/Expe_perf/perf_iut_office1.txt b/Expes/Analyses/CompDetectors/perf_iut_office1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_office1.txt rename to Expes/Analyses/CompDetectors/perf_iut_office1.txt diff --git a/Article/Expe_perf/perf_iut_office2.txt b/Expes/Analyses/CompDetectors/perf_iut_office2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_office2.txt rename to Expes/Analyses/CompDetectors/perf_iut_office2.txt diff --git a/Article/Expe_perf/perf_iut_office4.txt b/Expes/Analyses/CompDetectors/perf_iut_office4.txt similarity index 100% rename from Article/Expe_perf/perf_iut_office4.txt rename to Expes/Analyses/CompDetectors/perf_iut_office4.txt diff --git a/Article/Expe_perf/perf_iut_outdoor1.txt b/Expes/Analyses/CompDetectors/perf_iut_outdoor1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_outdoor1.txt rename to Expes/Analyses/CompDetectors/perf_iut_outdoor1.txt diff --git a/Article/Expe_perf/perf_iut_outdoor2.txt b/Expes/Analyses/CompDetectors/perf_iut_outdoor2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_outdoor2.txt rename to Expes/Analyses/CompDetectors/perf_iut_outdoor2.txt diff --git a/Article/Expe_perf/perf_iut_outdoor3.txt b/Expes/Analyses/CompDetectors/perf_iut_outdoor3.txt similarity index 100% rename from Article/Expe_perf/perf_iut_outdoor3.txt rename to Expes/Analyses/CompDetectors/perf_iut_outdoor3.txt diff --git a/Article/Expe_perf/perf_iut_scc1.txt b/Expes/Analyses/CompDetectors/perf_iut_scc1.txt similarity index 100% rename from Article/Expe_perf/perf_iut_scc1.txt rename to Expes/Analyses/CompDetectors/perf_iut_scc1.txt diff --git a/Article/Expe_perf/perf_iut_scc2.txt b/Expes/Analyses/CompDetectors/perf_iut_scc2.txt similarity index 100% rename from Article/Expe_perf/perf_iut_scc2.txt rename to Expes/Analyses/CompDetectors/perf_iut_scc2.txt diff --git a/Article/Expe_perf/perf_iut_scc3.txt b/Expes/Analyses/CompDetectors/perf_iut_scc3.txt similarity index 100% rename from Article/Expe_perf/perf_iut_scc3.txt rename to Expes/Analyses/CompDetectors/perf_iut_scc3.txt diff --git a/Article/Expe_perf/perf_loria_indoor1.txt b/Expes/Analyses/CompDetectors/perf_loria_indoor1.txt similarity index 100% rename from Article/Expe_perf/perf_loria_indoor1.txt rename to Expes/Analyses/CompDetectors/perf_loria_indoor1.txt diff --git a/Article/Expe_perf/perf_loria_indoor2.txt b/Expes/Analyses/CompDetectors/perf_loria_indoor2.txt similarity index 100% rename from Article/Expe_perf/perf_loria_indoor2.txt rename to Expes/Analyses/CompDetectors/perf_loria_indoor2.txt diff --git a/Article/Expe_perf/perf_loria_indoor3.txt b/Expes/Analyses/CompDetectors/perf_loria_indoor3.txt similarity index 100% rename from Article/Expe_perf/perf_loria_indoor3.txt rename to Expes/Analyses/CompDetectors/perf_loria_indoor3.txt diff --git a/Article/Expe_perf/perf_loria_office1.txt b/Expes/Analyses/CompDetectors/perf_loria_office1.txt similarity index 100% rename from Article/Expe_perf/perf_loria_office1.txt rename to Expes/Analyses/CompDetectors/perf_loria_office1.txt diff --git a/Article/Expe_perf/perf_loria_outdoor1.txt b/Expes/Analyses/CompDetectors/perf_loria_outdoor1.txt similarity index 100% rename from Article/Expe_perf/perf_loria_outdoor1.txt rename to Expes/Analyses/CompDetectors/perf_loria_outdoor1.txt diff --git a/Article/Expe_perf/perf_vosges_castle1.txt b/Expes/Analyses/CompDetectors/perf_vosges_castle1.txt similarity index 100% rename from Article/Expe_perf/perf_vosges_castle1.txt rename to Expes/Analyses/CompDetectors/perf_vosges_castle1.txt diff --git a/Article/Expe_perf/perf_vosges_castle2.txt b/Expes/Analyses/CompDetectors/perf_vosges_castle2.txt similarity index 100% rename from Article/Expe_perf/perf_vosges_castle2.txt rename to Expes/Analyses/CompDetectors/perf_vosges_castle2.txt diff --git a/Expes/Analyses/CompDetectors/readme.txt b/Expes/Analyses/CompDetectors/readme.txt new file mode 100644 index 0000000000000000000000000000000000000000..c932e8c15dbfe772c0da88426612df9b375e86b5 --- /dev/null +++ b/Expes/Analyses/CompDetectors/readme.txt @@ -0,0 +1 @@ +pour construire compTable.tex diff --git a/Methode/answerToReview.tex b/Methode/answerToReview.tex new file mode 100755 index 0000000000000000000000000000000000000000..3654ce1bee39ee9d3ab909548873dbc7798da840 --- /dev/null +++ b/Methode/answerToReview.tex @@ -0,0 +1,275 @@ +\documentclass{article} +\usepackage[latin1]{inputenc} +\usepackage{a4wide} +\usepackage{graphicx} +\usepackage{url} +\usepackage{psfrag} +\usepackage{pst-all} +\usepackage{framed} +\definecolor{dblue}{rgb}{0.2,0.2,0.6} + + +\title{Answer to the reviews of the paper: \\ +Thick Line Segment Detection with Fast Directional Tracking} + +\author{P. Even*, P. Ngo, and B. Kerautret} + +\newcommand{\ok}{\newline {\color{blue} \underline{Corrected.}}} + +\newenvironment{answer} + { \begin{framed} + \begingroup\color{blue}% + \textbf{Answer:} } + {\endgroup\end{framed}} + +%\newenvironment{answer}{\\ \color{blue}\noindent\it {\underline{Answer:}}}% + +\newenvironment{todo}{\\ \color{red} \noindent\it {\underline{\textbf{TODO:} }}}% + +\begin{document} + + \maketitle + +We would like to thank the editors and reviewers for their work and +for their constructive comments, questions and suggestions. +Because the paper already reaches the 10 pages limit, and in order to +avoid removal of possibly valuable contents for paper understanding, +additional data are put on the github, that is referenced in the paper. +A detailed list of the changes is given below with also some specific +answers to raised questions. +{\color{blue} \bf Our paper is attached to our answer and the proposed +modified text is highlighted in blue color}. + + + +\section{Reviewer \#4} + +\begin{itemize} +\item {\bf 1. Summary. In 3-5 sentences, +describe the key ideas and experiments and their significance.} +\begin{itemize} +\item A straight line detector in gray-scale images is proposed in this paper. +Proposed method is an extension of a previous work of the same author. +Novelties brang in this version are 1) adaptive directional scan which +provides extending the segment through apropriate direction, and 2) +control of assigned thickness used in blurred segment recognition, +where an initially assigned large thickness value is tuned regarding +to the amount of augmentation at blurred thickness observed in subsequent +iterations. +\end{itemize} + +\item {\bf 2. Strengths. Consider the significance of key ideas, +experimental validation, writing quality. Explain clearly why these aspects +of the paper are valuable.} +\begin{itemize} +\item Experimenting on the synthesized images, it is shown that the proposed +method brings performance improvement compared to the older version of the +method. +\item At the experiment done on the York Urban database, three literature +works are outperformed in terms of two evalutation metrics, i.e. C and L/N. +\item It is appreciated that the code of the work is published which +encourages reproducibility. +\item Writing quality of the paper is good. +\end{itemize} + +\item {\bf 3. Weaknesses. Consider significance of key ideas, experiments, +writing quality. Clearly explain why these are weak aspects of the paper, +e.g. why a specific prior work has already demonstrated the key contributions, +or why the experiments are insufficient to validate the claims.} +\begin{itemize} +\item Although performance improvement is achived comparejd to the previous +version of the method, it is not compared with the latest state-of-the-art +methods (e.g. [Almazan, E.J., Tal, R., Qian, Y. and Elder, J.H., 2017. +Mcmlsd: A dynamic programming approach to line segment detection. +In Proceedings of the IEEE Conference on Computer Vision and Pattern +Recognition (pp. 2031-2039).]), but mainly with three literature works, +latest of which was published in 2015. \\ + +\begin{answer} +Effectively we were not aware of this recent reference. +But contrarily to the other methods were no parameter had to be set for +comparisons, here the setting of the ranking level parameter could largely +influence achieved results. +Moreover, as it is written in Matlab, effective comparison of time performance +would require a complete re-programming in C-like language, with always +possible rewritting bias. +Therefore we add this reference in the state-of-the-art, but we can not +take it into account in the experiments. +\begin{todo} +Ajouter la publi dans l'etat de l'art et un laius dans la partie expe +pour dire qu'il n'est pas immediat de se comparer avec, et qu'on ne retient +que les m\'ethodes les plus proches en terme d'architecture. +DETAIL : voir si le protocole de comparaison propos\'e peut \^etre pris +en compte dans la r\'eponse. +\end{todo} +\end{answer} + +Some notes are as belows: +\begin{itemize} +\item What is the reason to sum the blurred segment thickness by 1/2 in Eq. 5? +\begin{answer} +It comes from a discrete geometry consideration linked to the space of all +discrete lines, that could provide the observed digitization (called the +pre-image). For instance, if we observe points on the same line, the +measured width is 1 pixel. If we set the assigned thickness to this value, +then we exclude possible one-line steps (well-known aliasing effect) for +nearly horizontal lines. +This value of 1/2 is assumed to bound all the possible expansions of the +observed line. \\ +The text was changed to precise the role of this half pixel margin. +But of course, we have no space left to discuss all these discrete geometry +considerations in the paper. +\begin{todo} +A pr\'eciser dans le texte : ... set to the observed thickness augmented by a +tolerance factor of a half pixel able to take into account all the possible +discrete lines which digitization fit to the selected points. +\end{todo} +\end{answer} + +\item Could the authors present a couple of example for sythesized images +that were used in the experiments and the performance of both versions of +the method obtained on them ? +\begin{answer} +An example synthetic image is already available in the mentioned github. +We complete it with detailed views and respective performance. +\begin{todo} +Github \`a compl\'eter. +\end{todo} +\end{answer} + +\item What is understood from the paper is the performance results presented +in Table 1 and 2 are obtained using unsupervised mode. Is it correct? +Then, why the performance obtained by supervised mode was not also presented, +despite Section 3.5 is allocated for explanation of it? +\begin{answer} +We confirm that performance results are obtained using supervised mode. +Actually, the supervised algorithm relies on the multiple unsupervised +algorithm, featuring all the method improvements. +Interactive aspects were more largely discussed in the former paper, on +a visual validation basis in lack of available ground truth directly +related to supervised line extraction. +We guess that such ground truth would closely depend on application +requirements. \\ +Thus we rather concentrate here on the automatic detection novelties, +assuming that achieved performance on all segments are also +valid for one or several manually extracted segments. +\begin{todo} +V\'eriier que dans la partie supervis\'ee, on ne pr\'esente rien de +sp\'ecifique, sinon, peut-\^etre qq chose \`enlever pour gagner de la place. +\end{todo} +\end{answer} + +\item It is not clearly mentioned and justified why the initial thickness +values of 7 and 3 are used at the experiments of Table 1 and 2, respectively. +Since one of the main motivation of superity of this work compared to the +previous version is claimed as not using a fixed thickness input value, +the justification of selection of these initial parameter values should be +presented and performance related to selection of this parameter value +should also be presented and discussed. +\begin{answer} +Experiments on synthetic images aims at assessing the improved performance +beween formal and actual versions of the detector, including the thickness +value estimation. We test here a thickness ranging from 2 up to 5 pixels. +Therefore the initial assigned thickness is set to a greater value : 7. \\ +For comparisons with other detectors, we restrict this thickness to a more +suitable value to what can be provided by the other detectors, that are not +designed to process too scattered lines. A value of pixels seems appropriate. +\begin{todo} +Pour la partie tests sur images de synth\`ese, ajouter +... to detect all the lines in the defined thickness range in unsupervised +mode. \\ +Dans la partie comparaison avec les autres, ajouter : +... is set to 3 pixels, as the other detectors are designed to find thin lines. +\end{todo} +\end{answer} + +\item I would like to see the performance of the previous version of the +method [11] at the York Urban dataset, in Table 2 and Fig. 5. +\begin{answer} +Unfortunately, we have no more left space to extend Fig. 5. +Results of both versions are not easy to visually detect. +So we add all these informations in the github, with the completed table. +\begin{todo} +De m\'emoire, il me semble qu'on obtient rien de significatif, voir pire, +des valeurs meilleures que la nouvelle version car les segments sont du coup +moins fractionn\'es. Je v\'erifie... +\end{todo} +\end{answer} + +\item I would suggest to the authors to give the reference numbers for the +methods ijn Table 2. If these numbers are obtained by the authors +but not got from the papers of LSD, ED-Lines and CannyLines, then it may be +better to refer to the methods with citing those papers at the caption of +Table 2. +\begin{answer} +Thanks for this relevant suggestion. The caption is completed. +\begin{todo} +\end{todo} +\end{answer} +\end{itemize} +\end{itemize} + +\item {\bf 4. Paper rating} +\begin{itemize} +\item Borderline +\end{itemize} + +\item {\bf 5. Justification of rating. +What are the most important factors in your rating? } +\begin{itemize} +\item A contribution of this work is it brings perfokkrmance improvement +compared to the previous published version of the method. +However, it is still not clear how the proposed method close to the state +of the art performaonces reported by recent papers. +\begin{answer} +We would just like to insist on the fact that our method additionnally +provides a measure of the line thickness without degrading other performance +with respect to some other recent detectors. +\end{answer} +\end{itemize} +\end{itemize} + + +\section{Reviewer \#5} + +\begin{itemize} +\item {\bf 1. Summary. In 3-5 sentences, +describe the key ideas and experiments and their significance.} +\begin{itemize} +\item This paper considers the problem of detecting straight lines +in grey-scale images, that is an essential task in several Computer Vision +applications. The proposed approach is based on adaptive directional scans +and control of assigned thickness, and it is evaluated on both synthetic and +real scenarios. +\end{itemize} + +\item {\bf 2. Strengths. Consider the significance of key ideas, +experimental validation, writing quality. Explain clearly why these aspects +of the paper are valuable.} +\begin{itemize} +\item Experimental results report good performances. +The authors provide both the code and an online demo. +\end{itemize} + +\item {\bf 3. Weaknesses. Consider significance of key ideas, experiments, +writing quality. Clearly explain why these are weak aspects of the paper, +e.g. why a specific prior work has already demonstrated the key contributions, +or why the experiments are insufficient to validate the claims.} +\begin{itemize} +\item As acknowledge by the authors, the method is sensitive to the initial +conditions. +\end{itemize} + +\item {\bf 4. Paper rating} +\begin{itemize} +\item Accept +\end{itemize} + +\item {\bf 5. Justification of rating. +What are the most important factors in your rating? } +\begin{itemize} +\item Experiments clearly demonstrate that this paper is a valid contribution. +\end{itemize} +\end{itemize} + +\end{document}