Mercurial > repos > iuc > pmids_to_pubtator_matrix
comparison text_to_wordmatrix.R @ 0:69714f06f18b draft default tip
"planemo upload for repository https://github.com/galaxyproject/tools-iuc/tools/simtext commit 63a5e13cf89cdd209d20749c582ec5b8dde4e208"
author | iuc |
---|---|
date | Wed, 24 Mar 2021 08:33:56 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:69714f06f18b |
---|---|
1 #!/usr/bin/env Rscript | |
2 # tool: text_to_wordmatrix | |
3 # | |
4 #The tool extracts the most frequent words per entity (per row). Text of columns starting with "ABSTRACT" or "TEXT" are considered. | |
5 #All extracted terms are used to generate a word matrix with rows = entities and columns = extracted words. | |
6 #The resulting matrix is binary with 0= word not present in abstracts of entity and 1= word present in abstracts of entity. | |
7 # | |
8 #Input: Output of "pubmed_by_queries" or "abstracts_by_pmids", or tab-delimited table with entities in column called “ID_<name>”, | |
9 #e.g. “ID_genes” and text in columns starting with "ABSTRACT" or "TEXT". | |
10 # | |
11 #Output: Binary matrix with rows = entities and columns = extracted words. | |
12 # | |
13 #usage: text_to_wordmatrix.R [-h] [-i INPUT] [-o OUTPUT] [-n NUMBER] [-r] [-l] [-w] [-s] [-p] | |
14 # | |
15 # optional arguments: | |
16 # -h, --help show help message | |
17 # -i INPUT, --input INPUT input file name. add path if file is not in working directory | |
18 # -o OUTPUT, --output OUTPUT output file name. [default "text_to_wordmatrix_output"] | |
19 # -n NUMBER, --number NUMBER number of most frequent words that should be extracted [default "50"] | |
20 # -r, --remove_num remove any numbers in text | |
21 # -l, --lower_case by default all characters are translated to lower case. otherwise use -l | |
22 # -w, --remove_stopwords by default a set of english stopwords (e.g., "the" or "not") are removed. otherwise use -w | |
23 # -s, --stemDoc apply Porter"s stemming algorithm: collapsing words to a common root to aid comparison of vocabulary | |
24 # -p, --plurals by default words in plural and singular are merged to the singular form. otherwise use -p | |
25 | |
26 if ("--install_packages" %in% commandArgs()) { | |
27 print("Installing packages") | |
28 if (!require("argparse")) install.packages("argparse", repo = "http://cran.rstudio.com/"); | |
29 if (!require("PubMedWordcloud")) install.packages("PubMedWordcloud", repo = "http://cran.rstudio.com/"); | |
30 if (!require("SnowballC")) install.packages("SnowballC", repo = "http://cran.rstudio.com/"); | |
31 if (!require("textclean")) install.packages("textclean", repo = "http://cran.rstudio.com/"); | |
32 if (!require("SemNetCleaner")) install.packages("SemNetCleaner", repo = "http://cran.rstudio.com/"); | |
33 if (!require("stringi")) install.packages("stringi", repo = "http://cran.rstudio.com/"); | |
34 if (!require("stringr")) install.packages("stringr", repo = "http://cran.rstudio.com/"); | |
35 } | |
36 | |
37 suppressPackageStartupMessages(library("argparse")) | |
38 suppressPackageStartupMessages(library("PubMedWordcloud")) | |
39 suppressPackageStartupMessages(library("SnowballC")) | |
40 suppressPackageStartupMessages(library("SemNetCleaner")) | |
41 suppressPackageStartupMessages(library("textclean")) | |
42 suppressPackageStartupMessages(library("stringi")) | |
43 suppressPackageStartupMessages(library("stringr")) | |
44 | |
45 parser <- ArgumentParser() | |
46 parser$add_argument("-i", "--input", | |
47 help = "input fie name. add path if file is not in workind directory") | |
48 parser$add_argument("-o", "--output", default = "text_to_wordmatrix_output", | |
49 help = "output file name. [default \"%(default)s\"]") | |
50 parser$add_argument("-n", "--number", type = "integer", default = 50, choices = seq(1, 500), metavar = "{0..500}", | |
51 help = "number of most frequent words used per ID in word matrix [default \"%(default)s\"]") | |
52 parser$add_argument("-r", "--remove_num", action = "store_true", default = FALSE, | |
53 help = "remove any numbers in text") | |
54 parser$add_argument("-l", "--lower_case", action = "store_false", default = TRUE, | |
55 help = "by default all characters are translated to lower case. otherwise use -l") | |
56 parser$add_argument("-w", "--remove_stopwords", action = "store_false", default = TRUE, | |
57 help = "by default a set of English stopwords (e.g., 'the' or 'not') are removed. otherwise use -s") | |
58 parser$add_argument("-s", "--stemDoc", action = "store_true", default = FALSE, | |
59 help = "apply Porter's stemming algorithm: collapsing words to a common root to aid comparison of vocabulary") | |
60 parser$add_argument("-p", "--plurals", action = "store_false", default = TRUE, | |
61 help = "by default words in plural and singular are merged to the singular form. otherwise use -p") | |
62 parser$add_argument("--install_packages", action = "store_true", default = FALSE, | |
63 help = "If you want to auto install missing required packages.") | |
64 | |
65 args <- parser$parse_args() | |
66 | |
67 | |
68 data <- read.delim(args$input, stringsAsFactors = FALSE, header = TRUE, sep = "\t") | |
69 word_matrix <- data.frame() | |
70 | |
71 text_cols_index <- grep(c("ABSTRACT|TEXT"), names(data)) | |
72 | |
73 for (row in seq(nrow(data))) { | |
74 top_words <- cleanAbstracts(abstracts = data[row, text_cols_index], | |
75 rmNum = args$remove_num, | |
76 tolw = args$lower_case, | |
77 rmWords = args$remove_stopwords, | |
78 stemDoc = args$stemDoc) | |
79 | |
80 top_words$word <- as.character(top_words$word) | |
81 | |
82 cat("Most frequent words for row", row, " are extracted.", "\n") | |
83 | |
84 if (args$plurals == TRUE) { | |
85 top_words$word <- sapply(top_words$word, function(x) { | |
86 singularize(x) | |
87 }) | |
88 top_words <- aggregate(freq~word, top_words, sum) | |
89 } | |
90 | |
91 top_words <- top_words[order(top_words$freq, decreasing = TRUE), ] | |
92 top_words$word <- as.character(top_words$word) | |
93 | |
94 number_extract <- min(args$number, nrow(top_words)) | |
95 word_matrix[row, sapply(1:number_extract, function(x) { | |
96 paste0(top_words$word[x]) | |
97 })] <- top_words$freq[1:number_extract] | |
98 } | |
99 | |
100 word_matrix <- as.matrix(word_matrix) | |
101 word_matrix[is.na(word_matrix)] <- 0 | |
102 word_matrix <- (word_matrix > 0) * 1 #binary matrix | |
103 | |
104 cat("A matrix with ", nrow(word_matrix), " rows and ", ncol(word_matrix), "columns is generated.", "\n") | |
105 | |
106 write.table(word_matrix, args$output, row.names = FALSE, sep = "\t", quote = FALSE) |