Skip to content
Snippets Groups Projects
Commit b272413d authored by Jacques Fize's avatar Jacques Fize
Browse files

DEBUG

parent 5ea67e5e
No related branches found
No related tags found
No related merge requests found
...@@ -302,13 +302,11 @@ X_1_train = np.array(X_1_train) ...@@ -302,13 +302,11 @@ X_1_train = np.array(X_1_train)
X_2_train = np.array(X_2_train) X_2_train = np.array(X_2_train)
y_lat_train = np.array(y_lat_train) y_lat_train = np.array(y_lat_train)
y_lon_train = np.array(y_lon_train) y_lon_train = np.array(y_lon_train)
y_train = np.array(y_train)
X_1_test = np.array(X_1_test) X_1_test = np.array(X_1_test)
X_2_test = np.array(X_2_test) X_2_test = np.array(X_2_test)
y_lat_test = np.array(y_lat_test) y_lat_test = np.array(y_lat_test)
y_lon_test = np.array(y_lon_test) y_lon_test = np.array(y_lon_test)
y_test = np.array(y_test)
logging.info("Data prepared !") logging.info("Data prepared !")
......
...@@ -71,7 +71,7 @@ logging.basicConfig( ...@@ -71,7 +71,7 @@ logging.basicConfig(
level=logging.INFO level=logging.INFO
) )
args = ConfigurationReader("./parser_config/toponym_combination_embedding.json")\ args = ConfigurationReader("./parser_config/toponym_combination_embedding_v2.json")\
.parse_args()#("-w --wikipedia-cooc-fn subsetCoocALL.csv ../data/geonamesData/allCountries.txt ../data/geonamesData/hierarchy.txt".split()) .parse_args()#("-w --wikipedia-cooc-fn subsetCoocALL.csv ../data/geonamesData/allCountries.txt ../data/geonamesData/hierarchy.txt".split())
# #
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
{"long": "--adjacency-iteration", "type":"int","default":1}, {"long": "--adjacency-iteration", "type":"int","default":1},
{ "short": "-n", "long": "--ngram-size", "type": "int", "default": 2 }, { "short": "-n", "long": "--ngram-size", "type": "int", "default": 2 },
{ "long": "--ngram-word2vec-iter", "type": "int", "default": 50 }, { "long": "--ngram-word2vec-iter", "type": "int", "default": 50 },
{ "short": "-t", "long": "--tolerance-value", "type": "float", "default": 100 }, { "short": "-t", "long": "--tolerance-value", "type": "float", "default": 0.002 },
{ "short": "-e", "long": "--epochs", "type": "int", "default": 100 }, { "short": "-e", "long": "--epochs", "type": "int", "default": 100 },
{ "short": "-d", "long": "--dimension", "type": "int", "default": 256 }, { "short": "-d", "long": "--dimension", "type": "int", "default": 256 },
{ "long": "--admin_code_1", "default": "None" } { "long": "--admin_code_1", "default": "None" }
......
...@@ -2,21 +2,19 @@ ...@@ -2,21 +2,19 @@
"description": "Toponym Combination", "description": "Toponym Combination",
"args": [ "args": [
{ "short": "geoname_input", "help": "Filepath of the Geonames file you want to use." }, { "short": "geoname_input", "help": "Filepath of the Geonames file you want to use." },
{ "short": "ngram_index_fn", "help": "Filepath of the NgramIndex file you want to use." }, { "short": "geoname_hierachy_input", "help": "Filepath of the Geonames file you want to use." },
{ "short": "embedding_fn", "help": "Filepath of the Embedding file you want to use." },
{ "short": "-n", "long": "--ngram-size", "type": "int", "default": 4 },
{ "short": "-d", "long": "--dimension", "type": "int", "default": 100 },
{ "short": "-v", "long": "--verbose", "action": "store_true" }, { "short": "-v", "long": "--verbose", "action": "store_true" },
{ "short": "-i", "long": "--inclusion", "action": "store_true" }, { "short": "-i", "long": "--inclusion", "action": "store_true" },
{ "short": "-a", "long": "--adjacency", "action": "store_true" }, { "short": "-a", "long": "--adjacency", "action": "store_true" },
{ "short": "-w", "long": "--wikipedia-cooc", "action": "store_true" }, { "short": "-w", "long": "--wikipedia-cooc", "action": "store_true" },
{ "long": "--inclusion-fn","help":"Cooccurrence data filename"},
{ "long": "--wikipedia-cooc-fn","help":"Cooccurrence data filename"}, { "long": "--wikipedia-cooc-fn","help":"Cooccurrence data filename"},
{ "long": "--adjacency-fn","help":"Adjacency data filename"}, { "long": "--cooc-sample-size", "type": "int", "default": 1 },
{ "long": "--cooc-sample", "type": "int", "default": 3 }, {"long": "--adjacency-iteration", "type":"int","default":1},
{"long": "--adjacency-sample", "type":"int","default":1}, { "short": "-n", "long": "--ngram-size", "type": "int", "default": 2 },
{ "long": "--ngram-word2vec-iter", "type": "int", "default": 50 },
{ "short": "-t", "long": "--tolerance-value", "type": "float", "default": 100 },
{ "short": "-e", "long": "--epochs", "type": "int", "default": 100 }, { "short": "-e", "long": "--epochs", "type": "int", "default": 100 },
{ "short": "-b", "long": "--batch-size", "type": "int", "default": 100 }, { "short": "-d", "long": "--dimension", "type": "int", "default": 256 },
{ "short": "-k", "long": "--k-value", "type": "float", "default": 100 ,"help":"Used for the accuracy@k metrics. Given in kilometers"} { "long": "--admin_code_1", "default": "None" }
] ]
} }
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment