Abstract
This process creates aggregations based on the previously stratified data for qualifications. It includes statistics based on the responses given to the qualification-related fields on the Europass survey.# http://rmarkdown.rstudio.com/html_document_format.htm
sourceTimeNeeded <- c(0);
source.starting.time <- proc.time()[3]Information about the libraries, environment, sources used and their execution is reported. Aditional information is provided within section tabs. Navigating through the report is also possible through the table of contents. Tables reported, can be dynamically filtered, searched ordered and exported into various formats.
librariesVersion <- c()
for(i in 1:length(libraries))
    librariesVersion <- c(librariesVersion, paste(packageVersion(libraries[i] )))
librariesLoaded <- lapply(libraries, require, character.only = TRUE) ## Loading required package: dplyr
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:data.table':
## 
##     between, first, last
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
## Loading required package: DT
## Loading required package: stringr
## Loading required package: magrittr
## Loading required package: text2vec
## Loading required package: stopwords
## Loading required package: cld2
## Loading required package: cld3
## 
## Attaching package: 'cld3'
## The following objects are masked from 'package:cld2':
## 
##     detect_language, detect_language_mixed
## Loading required package: parallel
../000.core/00.01.libraries.R completed in 2.05 seconds
sourceTimeNeeded <- c( sourceTimeNeeded, timeNeeded)
source.starting.time <- proc.time()[3]
## Base functions
# ESCO skills
# @authors ds@eworx.gr
repository <- "/data/generic/"
getSourcePath <- function(filename, baseFolder = repository){
    return(paste(baseFolder, filename, sep = ""))
}
readData <- function(filename, colClasses = c(), baseFolder = repository, header = TRUE, sep = "\t", encoding = "UTF-8", stringsAsFactors = TRUE, na.strings = c("", "NULL"), verbose = FALSE){
    if(length(colClasses) == 0)
        return (data.table::fread(input = getSourcePath(filename, baseFolder), header = header, sep = sep, encoding = encoding, stringsAsFactors = stringsAsFactors, verbose = verbose, showProgress = TRUE, na.strings = na.strings ) )
    return (data.table::fread(input = getSourcePath(filename, baseFolder), colClasses = colClasses, header = header, sep = sep, encoding = encoding, verbose = verbose, showProgress = TRUE,   na.strings = na.strings )  )
}
#rds for small disk space & fst for fast load
saveBinary <- function(data, filename = filename, baseFolder = repository, format = "rds"){
  fileName <- getSourcePath(filename, baseFolder)
  dir.create(dirname(fileName), recursive = TRUE, showWarnings = FALSE)
  if(format == "rds") saveRDS(data, fileName)
  if(format == "fst") fst::write_fst(data, fileName)
}
#rds for small disk space & fst for fast load
loadBinary <- function(filename, baseFolder = repository, format = "rds", as.data.table = TRUE){
  if(format == "rds"){return(readRDS(getSourcePath(filename, baseFolder)))}
  if(format == "fst"){return(fst::read_fst(getSourcePath(filename, baseFolder), as.data.table = as.data.table))}
}
rowColumns <- function(data){
    return(paste( format(nrow(data),  big.mark=","), "Rows X ", ncol(data), "Columns"))
}
publishIncludeCss <- function(){
    sourceFile <- "/data/jobs/wp41.analysis/000.core/include.css"
    destinatinoFile <- "/data/tmpfs/results/include.css"
    if (!file.exists(destinatinoFile)) {
        return (file.copy(sourceFile, destinatinoFile))
    }else{
        return(TRUE);
    }
}
#as the mountstorage is on memory make sure the asset include.css is there.
summariseTable <- function(data){
    return(data.frame(unclass(summary(data)), check.names = FALSE, stringsAsFactors = FALSE))
    #return(do.call(cbind, lapply(data, summary)))
}
factoriseCharacterColumns <- function(data){
    for(name in names(data)){
        if( class(data[[name]]) =="character"){
            data[[name]] <- as.factor(data[[name]])
        }
    }
    return(data) 
}
codeBook <- function(dataset){
  out <- lapply(names(dataset), function(var_name) {
    knitr::knit_expand(text = readLines("../000.core/codeBook.template"))
  })
  
  cat(
    knitr::knit(
      text = unlist(paste(out, collapse = '\n')), 
      quiet = TRUE)
    )
}
fwrite_zip <- function(data, filename, quote = TRUE){
  dir.create(dirname(filename), recursive = TRUE, showWarnings = FALSE)
  filename_csv <- strsplit(filename, "/") %>% unlist %>% tail(1)
  filename_csv <- gsub(".zip", ".csv", filename_csv)
  fwrite(data, filename_csv, quote = quote)
  if(file.exists(filename))unlink(filename)
  zip(filename, filename_csv)
  unlink(filename_csv)
}
############################
# https://rstudio.github.io/DT/010-style.html
#https://rpubs.com/marschmi/RMarkdown
capitalise <- function(x) paste0(toupper(substring(x, 1, 1)), substring(x, 2, nchar(x)))
styliseDTNumericalColumn <- function(data, result, columnName, color, columnsName_original ){
    if(columnName%in% columnsName_original){
        result <- result %>%   formatStyle(
            columnName,
            background = styleColorBar(data[[columnName]], color),
            backgroundSize = '100% 90%',
            backgroundRepeat = 'no-repeat',
            backgroundPosition = 'center'
        )
    }
    return(result)
}
reportTabularData <- function(data, anonymize=TRUE){
  if(anonymize)return()
    
    columnsName <- names(data)
    columnsName <- lapply(columnsName, capitalise)
    columnsName_original <- names(data)
    result <-
        DT::datatable(
            data,
            class = 'cell-border stripe',
            filter = 'top',
            rownames = FALSE,
            colnames = columnsName,
            extensions = 'Buttons',
            options = list(
                pageLength = 20,
                columnDefs = list(list(className = 'dt-left', targets = "_all")),
                dom = 'Bfrtip',
                buttons = c('copy', 'csv', 'excel', 'pdf'),
                searchHighlight = TRUE,
                initComplete = JS(
                    "function(settings, json) {",
                        "$(this.api().table().header()).css({'border': '1px solid'});",
                    "}"
                )
            )
        )
    result <- styliseDTNumericalColumn(data,result, "Count", 'steelblue', columnsName_original)
    result <- styliseDTNumericalColumn(data,result, "sourceTimeNeeded", '#808080', columnsName_original)
    result <- styliseDTNumericalColumn(data,result, "timeNeeded", '#808080', columnsName_original)
    #result <- styliseDTNumericalColumn(data,result, "percentMatch", '#5fba7d', columnsName_original)
    result <- styliseDTNumericalColumn(data,result, "percentMatch", '#4682b4', columnsName_original)
 
 
    return(result)
}
fonts <- list(
 sans = "DejaVu Serif",
  mono = "DejaVu Serif",
  `Times New Roman` = "DejaVu Serif"
)
#read_xml_to_list <- function(filepath, is.gz = FALSE){
#   if(is.gz){  
#       temp_data <- paste0(repository, "data/delete.me")
#       result <- xmlToList(xmlParse(gunzip(filepath, destname = temp_data, remove =FALSE)))
#       Sys.chmod(file.path(temp_data), "777", use_umask = FALSE)
#       unlink(temp_data)
#       result
#   }else{
#       xmlToList(xmlParse(filepath))
#   }
#}
#transpose_list_to_dt <- function(data_list){
#  dt <- t(as.data.table(data_list))
#  dt <- as.data.table(dt)
#  dt[, (names(dt)) := lapply(.SD, unlist), .SDcols = 1:ncol(dt)]
#  dt[, (names(dt)) := lapply(.SD, unlist), .SDcols = 1:ncol(dt)]
#  names(dt) <- names(data_list[[1]])
#  dt
#}
cleansingCorpus <- function(
    htmlString, rem.html =TRUE, rem.http = TRUE, rem.newline = TRUE,
    rem.nonalphanum = TRUE, rem.longwords = TRUE, rem.space = TRUE, 
    tolower = TRUE, add.space.to.numbers = TRUE, rem.country.begin = FALSE,
    rem.nonalphanum.begin = FALSE, rem.space.begin = FALSE
){
  if(rem.html){text <- gsub("<.*?>", " ", htmlString)} # removing html commands
  if(rem.http){text <- gsub(" ?(f|ht)tp(s?)://(.*)[.][a-z]+", " ", text)} #removing http destinations
  if(rem.newline){text <- gsub("[\r\n\t]", " ", text)} 
  if(rem.nonalphanum){text <- gsub("[^[:alpha:]]", " ", text)} #removing non-alphanumeric
  if(rem.longwords){text <- gsub("\\w{35,}", " ", text)} ##Removing words with more than 30 letters
  if(rem.space){text <- gsub("\\s+", " ", text)}  #removing excess space 
  if(tolower){text <- tolower(text)}
  if(add.space.to.numbers){    #add space between number and letters
    text <- gsub("([0-9])([[:alpha:]])", "\\1 \\2", text)
    text <- gsub("([[:alpha:]]|[.])([0-9])", "\\1 \\2", text)
  }
  if(rem.space.begin){text <- gsub("^[[:space:]]*", "", text)} 
  if(rem.country.begin){text <- gsub("^EU", "", text)} #remove country codes from the beginning of the text
  if(rem.nonalphanum.begin){text <- gsub("^[?–-]*", "", text)} #remove special characters identified in the beginning of text
  if(rem.space.begin){text <- gsub("^[[:space:]]*", "", text)}
  trimws(text)
}
cleansingEducationCorpus <- function(text) {
  text <- gsub("\\.", "", text) #removing periods
  text <- gsub("[[:punct:]]", " ", text) #removing other punctuation
  text <- gsub("\\s+", " ", text) #removing excess space
  text <- tolower(text) #changing case to lower
  #removing accent from Greek
  text <- gsub("ς", "σ", text)
  text <- gsub("ά", "α", text)
  text <- gsub("έ", "ε", text)
  text <- gsub("ή", "η", text)
  text <- gsub("ί", "ι", text)
  text <- gsub("ύ", "υ", text)
  text <- gsub("ό", "ο", text)
  text <- gsub("ώ", "ω", text)
  trimws(text) #trimming white-space
}
#This function removes dates that are "relics" from the xml parsing
removeDates <- function(text){
  days <-  "(Sunday,|Monday,|Tuesday,|Wednesday,|Thursday,|Friday,|Saturday,)"
  months <- "(January|February|March|April|May|June|July|August|September|October|November|December|Months)"
  date_form1 <- paste(days, months, "([0-9]|[0-9][0-9]), [0-9][0-9][0-9][0-9]")
  date_form2 <- "\\?[0-9][0-9][0-9][0-9]"
  text <- gsub(date_form1, " ", text)
  gsub(date_form2, " ", text)
}
xmlToDataTable <- function(xmlData, itemNames){
  itemList <- lapply(itemNames,
    function(x){
      xml_text(xml_find_all(xmlData, paste0(".//item/", x)))
    }
  )
  names(itemList) <- xmlItems
  as.data.table(itemList)
}
cleanCorpusHtml <- function(text){
  unlist(lapply(text, function(x){
    if(nchar(x) > 0){
        # because nodes were starting with tag keywords in li, we relocate at the end so the information remains and the description 
        # starts with the main content
        html <- gsub(">","> ", x) # add spaces after html tags so these aren't concatenated 
        xml <- read_xml(html, as_html = TRUE)
        lis <- xml_find_all(xml, ".//li")
        xml_remove(lis)
        text <- paste( paste(xml_text(xml), collapse ="") , paste(xml_text(lis) , collapse =""), collapse ="")
        text <- gsub("\\s+"," ",  text)
    }else {""}
  }))
}
#Split equally a vector into chunks of number n_chunks
equal_split <- function(vct, n_chunks) {
  lim <- length(vct)
  fstep <- lim%/%n_chunks
  idx_list <- list()
  for(i in seq(n_chunks - 1)){
    idx_list[[i]] <- vct[((i-1)*fstep + 1):(i*(fstep))]
  }
  idx_list[[n_chunks]] <- vct[((n_chunks - 1)*fstep + 1):(lim)]
  return(idx_list)
}
#Function that takes a vector, and returns thresholded first 10 sorted indexes
getThresholdOrderRwmd <- function(vct, idVec, threshold = 1e-6, numHead = 10){
    vct <- ifelse(vct > threshold, vct, Inf)
    indexVec <- head(order(vct), numHead)
    idVec[indexVec]
}
#Function to read xml nodes in description
maintainElements <- function(nodes, elementType = "a", attribute = "href"){
    xml_attr(xml_find_all(nodes, paste0(".//", elementType)), attribute)
}
#Function to add results to datatable
elementsToDataTable <- function(result, elementType){
    if(length(result) > 0)
        data.table(elementType = elementType, attributeValue = result)
    else
        data.table()
}
#Function to retrieve urls from text
keepHtmlElements <- function(feedItem){
    nodes <- read_xml(paste0("<div>",  feedItem, "</div>"), as_html = TRUE)
    rbind(
        elementsToDataTable(maintainElements(nodes, "a", "href"), "link"),
        elementsToDataTable(maintainElements(nodes, "img", "src"), "image"),
        elementsToDataTable(maintainElements(nodes, "img-src", "src"), "image")
        #All "img-src" are NA
    )
}
#retrieve list of parameters in a http request query
getQueryParams <- function(url){
  query <- httr::parse_url(url)$query
  queryValues <- unlist(query)
  queryNames <- names(query)
  dat <- data.table(varName = queryNames, value = queryValues)
  dat[queryValues != ""]
}
keepCountryName <- function(string){
  string <- gsub(".*_", "", string)
  gsub("\\..*", "", string)
}
keepNTokens <- function(string, num){
    tokenList <- strsplit(string, split = " ")
    sapply(tokenList, function(tokens){
        tokens <- sort(tokens)
        tokensShift <- shift(tokens, -num, fill = FALSE)
        paste(tokens[tokens != tokensShift], collapse = " ")    
    })
}
findTFIDF <- function(corpus, stopwords, normalize = "double", min_char = 1) {
  tokensList <- strsplit(corpus[, text], " ")
  names(tokensList) <- corpus[, code]
  tokensDT <- lapply(tokensList, as.data.table) %>% 
    rbindlist(idcol = TRUE) %>%
        setnames(c("class", "term"))
  tokensDT <- tokensDT[!term %in% stopwords][nchar(term) > min_char]
    #inverse document frequency smooth  
    idfDT <- tokensDT[!duplicated(tokensDT)][, .(docFreq = .N), by = "term"]
    idfDT[, idf :=  log(length(unique(tokensDT$class)) / (docFreq + 1)) + 1]    
  tfDT <- tokensDT[, .(term_count = .N), by = c("class", "term")]
  if(normalize == "double")tfDT[, tf := 0.5 + 0.5 * term_count / max(term_count)]
  if(normalize == "log")tfDT[, tf := log(1 +term_count)]
  
  merge(tfDT, idfDT, on = "term")[, tfIdf := tf*idf ][, .(term, class, tfIdf)]
    
}
tidyJsonData <- function(jsonList){
    if(length(jsonList) == 0)return(NULL)
    unlistOccupations <- jsonList %>% unlist
    codesMaleFemale <- names(unlistOccupations)
    epasMapping <- data.table(unlistOccupations)
    epasMapping[ , code := gsub("\\.[[:alpha:]]$", "", codesMaleFemale)]
    uniqueMappingBoolean <- epasMapping[ , unlistOccupations != c(unlistOccupations[-1], F), by = code]$V1
    codesEpasDB <- epasMapping[uniqueMappingBoolean]
    names(codesEpasDB) <- c("title", "code")
    codesEpasDB[ , title := cleansingCorpus(title)]
}
findNGrams <- function(corpus, min_n, max_n = min_n, stopWords = NA_character_) {
  ngrams <- itoken(corpus, tokenizer = word_tokenizer, progressbar = FALSE) %>%
    create_vocabulary(stopwords = stopWords, c(min_n, max_n), sep_ngram = " ") %>%
    as.data.table()
  ngrams[order(-doc_count)][, .(term, count = doc_count)]
}
`%W>%` <- function(lhs,rhs){
  w <- options()$warn
  on.exit(options(warn=w))
  options(warn=-1)
  eval.parent(substitute(lhs %>% rhs))
}
getStopwords <- function(locale) { 
  stopwordsLocale <- c(stopwords_getlanguages(source = "misc"), stopwords_getlanguages(source = "snowball")) 
  stopWords <- ""
  if (locale %in% stopwordsLocale) 
    stopWords <- locale %W>% stopwords
  stopWords  
}
###################################################################################################
# Text translation
###################################################################################################
translateText <- function(sourceText, sourceLang, translationLang, batchSize = 4800) {
  if (length(sourceText) == 0) {
    return ("")
  } else if (length(sourceText) == 1) {
    return (requestTranslation(sourceText, sourceLang, translationLang))
  } else if (length(sourceText) > 4800) {
    return (NA)
  }
  sourceQueries <- gsub("$", "\n >", sourceText)
  sourceQueries <- gsub("^", "< \n", sourceQueries)
  queries <- data.table(query = sourceQueries, size = nchar(sourceQueries), batch = nchar(sourceQueries))
  for (row in seq(nrow(queries) - 1)) {
    cumulativeSum <- queries[row, batch] + queries[row + 1, batch] + 3
    queries[row + 1, batch := ifelse(cumulativeSum > batchSize, batch, cumulativeSum)]
  }
  batchStarts <- which(queries[, size] == queries[, batch])
  batchFins <- c(batchStarts[-1] - 1, nrow(queries))
  batches <- lapply(seq_along(batchStarts), function(i) batchStarts[i]:batchFins[i])
  pastedQueries <- lapply(batches, function(batch) paste0(queries[batch, query], collapse = "\n")) %>% unlist()
  translatedText <- lapply(pastedQueries, requestTranslation, sourceLang, translationLang) %>% 
    unlist() %>%
    paste0(collapse = " ")
  translatedText <- gsub("\\s?<", "", translatedText)
  gsub(">$", "", translatedText) %>% 
    space_tokenizer(sep = ">") %>% 
    unlist() %>% 
    trimws()
}
requestTranslation <- function(sourceText, sourceLang, translationLang) { 
  googleTranslateURL <- paste0(
    "https://translate.google.com/m",
    "?hl=", sourceLang,
    "&sl=", sourceLang,
    "&tl=", translationLang,
    "&ie=UTF-8&prev=_m&q=", URLencode(sourceText, reserved = TRUE)
  )
  GET(googleTranslateURL, add_headers("user-agent" = "Mozilla/5.0")) %>%
    read_html() %>% 
    xml_child(2) %>%
    xml_child(5) %>%
    xml_text() %>%
    unlist()
}
############################################################################################################# R version 4.0.5 (2021-03-31)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Ubuntu 20.04.2 LTS
## 
## Matrix products: default
## BLAS/LAPACK: /usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.8.so
## 
## locale:
##  [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C              
##  [3] LC_TIME=en_US.UTF-8        LC_COLLATE=en_US.UTF-8    
##  [5] LC_MONETARY=en_US.UTF-8    LC_MESSAGES=C             
##  [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                 
##  [9] LC_ADDRESS=C               LC_TELEPHONE=C            
## [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
## 
## attached base packages:
## [1] parallel  stats     graphics  grDevices utils     datasets  methods  
## [8] base     
## 
## other attached packages:
##  [1] cld3_1.4.1        cld2_1.2.1        stopwords_2.2     text2vec_0.6     
##  [5] magrittr_2.0.1    stringr_1.4.0     DT_0.18           dplyr_1.0.6      
##  [9] rmarkdown_2.8     data.table_1.14.0
## 
## loaded via a namespace (and not attached):
##  [1] Rcpp_1.0.6           pillar_1.6.0         bslib_0.2.5         
##  [4] compiler_4.0.5       jquerylib_0.1.4      highr_0.9           
##  [7] tools_4.0.5          digest_0.6.27        jsonlite_1.7.2      
## [10] evaluate_0.14        lifecycle_1.0.0      tibble_3.1.1        
## [13] lattice_0.20-41      pkgconfig_2.0.3      rlang_0.4.11        
## [16] Matrix_1.3-2         mlapi_0.1.0          RhpcBLASctl_0.20-137
## [19] yaml_2.2.1           xfun_0.23            knitr_1.33          
## [22] generics_0.1.0       vctrs_0.3.8          sass_0.4.0          
## [25] htmlwidgets_1.5.3    grid_4.0.5           tidyselect_1.1.1    
## [28] glue_1.4.2           R6_2.5.0             fansi_0.4.2         
## [31] lgr_0.4.2            purrr_0.3.4          ellipsis_0.3.2      
## [34] htmltools_0.5.1.1    float_0.2-4          rsparse_0.4.0       
## [37] utf8_1.2.1           stringi_1.6.2        crayon_1.4.1
../000.core/00.02.base.functions.R completed in 0.11 seconds
Type of data: data.table, data.frame.
Dimensions: 353518, 20.
Column Names: id, locale, creationDate, lastUpdate, postalcode, country, gender, birthdate, nationality, work_years, num_work, min_work_years, max_work_years, mean_work_years, is_employed, eqf_level, eqf_previous, is_student, headline_type, headline_isco.
Type of data: data.table, data.frame.
Dimensions: 1766160, 26.
Column Names: index, id, locale, country, nationality, birthYear, age, final_level, institution_name, institution_short, numQual, title, organisation, organisationCountry, from, to, status, fromAge, toAge, studyAge, length, cumLength, eqf_level, eqf_group, edu_level, edu_field.
Type of data: data.table, data.frame.
Dimensions: 1303728, 35.
Column Names: id, URI, from, to, label, employer, code, occupationTitle, iscoCode1, iscoCode2, iscoCode3, iscoCode4, iscoLabel1, iscoLabel2, iscoLabel3, iscoLabel4, locale, creationDate, lastUpdate, postalcode, country, gender, birthdate, nationality, work_years, num_work, min_work_years, max_work_years, mean_work_years, is_employed, eqf_level, eqf_previous, is_student, headline_type, headline_isco.
Type of data: data.table, data.frame.
Dimensions: 1139016, 24.
Column Names: id, locale, creationDate, lastUpdate, postalcode, country, gender, birthdate, nationality, work_years, num_work, min_work_years, max_work_years, mean_work_years, is_employed, eqf_level, eqf_previous, is_student, headline_type, headline_isco, skillCode, type, category, skillTitle.
1.load.data.R completed in 19.9 seconds
setnames(demographStat, 
    c("birthdate", "eqf_level", "work_years", "num_work", "headline_type"), 
    c("birth_year", "eqf_highest", "total_work_years", "num_jobs", "headline_job")
)
setnames(educationStat, 
    c("birthYear", "from", "to", "organisationCountry", "eqf_level", "edu_field", "institution_name"), 
    c("birth_year", "enrollment_year", "graduation_year", "institution_country", "eqf_granted", "education_field", "institution")
)
setnames(workStat,
    c("from", "to", "occupationTitle", "iscoLabel3"),
    c("recruitment_year", "termination_year", "latest_job_esco", "latest_job_isco")
)charCols <- c("birth_year", "eqf_highest", "eqf_previous")
demographStat[, (charCols) := lapply(.SD, as.character), .SDcols = charCols]
charCols <- c("eqf_granted", "enrollment_year", "graduation_year", "birth_year")
educationStat[, (charCols) := lapply(.SD, as.character), .SDcols = charCols]
demographStat[, mean_work_years := round(mean_work_years, 1)]demographStat[, eqf_completed := ifelse(is_student == TRUE, eqf_previous, eqf_highest)]
educationAbroad <- educationStat[, .(id, studied_abroad = (nationality != institution_country))][!duplicated(id)][order(-studied_abroad)]
demographStat <- merge(demographStat, educationAbroad, by = "id", all.x = TRUE)
workLatestJob <- workStat[order(id, -recruitment_year), .(id, latest_job_esco, latest_job_isco)][!duplicated(id)]
demographStat <- merge(demographStat, workLatestJob, by = "id", all.x = TRUE)
motherTongue <- skillsStat[category == "Linguistic.MotherTongue.Label", .(id, mother_tongue = skillTitle)]
demographStat <- merge(demographStat, motherTongue, by = "id", all.x = TRUE)
educationStat <- merge(educationStat, demographStat[, .(id, gender, mother_tongue, eqf_highest, headline_job, headline_isco)], by = "id")2.process.data.R completed in 9.82 seconds
Collecting aggregate education statistics by qualifications response.
aggregate_cols <- c(
    "eqf_granted", "institution", "institution_country", "enrollment_year", "graduation_year",
    "locale", "country", "birth_year", "gender", "nationality", "mother_tongue", "eqf_highest"
)
qualifications_aggregate <- educationStat[!duplicated(index), .SD, .SDcols = aggregate_cols]## NULL
3.process.data.R completed in 1.92 seconds
Collecting aggregate data on the education fields using Qualifications responses.
aggregate_cols <- c(
    "education_field", "eqf_granted", "institution", "institution_country", "enrollment_year", "graduation_year",
    "locale", "country", "birth_year", "gender", "nationality", "mother_tongue", "headline_job", "headline_isco", "eqf_highest"
)
education_fields_aggregate <- educationStat[, .SD, .SDcols = aggregate_cols]## NULL
4.process.data.R completed in 5.43 seconds
fst formatfilename <- "jobsOutput/code_book/aggregations/qualifications_aggregate.fst"
saveBinary(qualifications_aggregate, filename, format = "fst")Datasource : /data/generic/jobsOutput/code_book/aggregations/qualifications_aggregate.fst of 24,675,921 bytes.
filename <- "jobsOutput/code_book/aggregations/education_fields_aggregate.fst"
saveBinary(education_fields_aggregate, filename, format = "fst")Datasource : /data/generic/jobsOutput/code_book/aggregations/education_fields_aggregate.fst of 87,192,592 bytes.
csv formatfilename <- "/data/tmpfs/results/survey_data/csv/qualifications_aggregate.zip"
fwrite_zip(qualifications_aggregate, filename)Datasource : /data/tmpfs/results/survey_data/csv/qualifications_aggregate.zip of 4,020,416 bytes.
filename <- "/data/tmpfs/results/survey_data/csv/education_fields_aggregate.zip"
fwrite_zip(education_fields_aggregate, filename)Datasource : /data/tmpfs/results/survey_data/csv/education_fields_aggregate.zip of 17,458,908 bytes.
5.save.data.R completed in 34.37 seconds
Completed in 73.6 seconds.