Here, we apply the pre-trained model and our fine-tuned model to data not used for training, a holdout. The holdout sample was collected by Bainbridge et al. 2022.

knitr::opts_chunk$set(echo = TRUE, error = T, message = F, warning = F)

# Libraries and Settings

# Libs ---------------------------
library(knitr)
library(tidyverse)
library(arrow)
library(glue)
library(psych)
library(lavaan)
library(ggplot2)
library(plotly)
library(gridExtra)
library(broom)
library(broom.mixed)
library(brms)
library(tidybayes)
library(cmdstanr)
library(cowplot)

options(mc.cores = parallel::detectCores(), 
        brms.backend = "cmdstanr", 
        brms.file_refit = "on_change")


model_name = "ItemSimilarityTraining-20240502-trial12"
#model_name = "item-similarity-20231018-122504"
pretrained_model_name = "all-mpnet-base-v2"

data_path = glue("./")
pretrained_data_path = glue("./")

set.seed(42)


holdout <- arrow::read_feather(file = file.path(data_path, glue("ignore.{model_name}.raw.osf-bainbridge-2021-s2-0.item_correlations.feather")))

pt_holdout <- arrow::read_feather(file = file.path(data_path, glue("ignore.{pretrained_model_name}.raw.osf-bainbridge-2021-s2-0.item_correlations.feather")))

holdout_mapping_data = arrow::read_feather(
  file = file.path(data_path, glue("{model_name}.raw.osf-bainbridge-2021-s2-0.mapping2.feather"))
) %>%
  rename(scale_0 = scale0,
         scale_1 = scale1)

holdout_human_data = arrow::read_feather(
  file = file.path(data_path, glue("{model_name}.raw.osf-bainbridge-2021-s2-0.human.feather"))
)

holdout_scales <- arrow::read_feather(file.path(data_path, glue("{model_name}.raw.osf-bainbridge-2021-s2-0.scales.feather"))
)

N <- holdout_human_data %>% summarise_all(~ sum(!is.na(.))) %>% min()
total_N <- nrow(holdout_human_data)

The Bainbridge data was collected on N=493 respondents. The item with the most missing values still had n=480.

Synthetic inter-item correlations

holdout_llm <- holdout %>%
  left_join(holdout_mapping_data %>% select(variable_1 = variable, InstrumentA = instrument, ScaleA = scale_0, SubscaleA = scale_1)) %>%
  left_join(holdout_mapping_data %>% select(variable_2 = variable, InstrumentB = instrument, ScaleB = scale_0, SubscaleB = scale_1))

pt_holdout_llm <- pt_holdout %>%
  left_join(holdout_mapping_data %>% select(variable_1 = variable, InstrumentA = instrument, ScaleA = scale_0, SubscaleA = scale_1)) %>%
  left_join(holdout_mapping_data %>% select(variable_2 = variable, InstrumentB = instrument, ScaleB = scale_0, SubscaleB = scale_1))

Accuracy

se2 <- mean(holdout_llm$empirical_r_se^2)

r <- broom::tidy(cor.test(holdout_llm$empirical_r, holdout_llm$synthetic_r))
pt_r <- broom::tidy(cor.test(pt_holdout_llm$empirical_r, pt_holdout_llm$synthetic_r))

model <- paste0('
  # Latent variables
  PearsonLatent =~ 1*empirical_r

  # Fixing error variances based on known standard errors
  empirical_r ~~ ',se2,'*empirical_r

  # Relationship between latent variables
  PearsonLatent ~~ synthetic_r
')

fit <- sem(model, data = holdout_llm)
pt_fit <- sem(model, data = pt_holdout_llm)

m_synth_r_items <- brm(
  bf(empirical_r | mi(empirical_r_se) ~ synthetic_r + (1|mm(variable_1, variable_2)),
     sigma ~ poly(synthetic_r, degree = 3)), data = holdout_llm, 
  file = "ignore/m_synth_r_items_lm")

sd_synth <- sd(m_synth_r_items$data$synthetic_r)

newdata <- m_synth_r_items$data %>% select(empirical_r, synthetic_r, empirical_r_se)
epreds <- epred_draws(newdata = newdata, obj = m_synth_r_items, re_formula = NA, ndraws = 200)
preds <- predicted_draws(newdata = newdata, obj = m_synth_r_items, re_formula = NA, ndraws = 200)
epred_preds <- epreds %>% left_join(preds)
by_draw <- epred_preds %>% group_by(.draw) %>% 
  summarise(.epred = var(.epred),
            .prediction = var(.prediction),
            sigma = sqrt(.prediction - .epred),
            semi_latent_r = sqrt(.epred/.prediction))
rm(epred_preds)

accuracy_bayes_items <- by_draw %>% mean_hdci(semi_latent_r)

bind_rows(
  pt_r %>% 
    mutate(model = "pre-trained", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(pt_fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "pre-trained", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  r %>% 
    mutate(model = "fine-tuned", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  accuracy_bayes_items %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (Bayesian EIV)") %>% 
    select(model, kind, accuracy = semi_latent_r, conf.low = .lower, conf.high = .upper)
  ) %>% 
  knitr::kable(digits = 2)
model kind accuracy conf.low conf.high
pre-trained manifest 0.19 0.18 0.19
pre-trained semi-latent (SEM) 0.19 0.19 0.20
fine-tuned manifest 0.67 0.67 0.68
fine-tuned semi-latent (SEM) 0.70 0.70 0.70
fine-tuned semi-latent (Bayesian EIV) 0.71 0.71 0.72

Prediction error plot according to synthetic estimate

m_synth_r_items
##  Family: gaussian 
##   Links: mu = identity; sigma = log 
## Formula: empirical_r | mi(empirical_r_se) ~ synthetic_r + (1 | mm(variable_1, variable_2)) 
##          sigma ~ poly(synthetic_r, degree = 3)
##    Data: holdout_llm (Number of observations: 87153) 
##   Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup draws = 4000
## 
## Group-Level Effects: 
## ~mmvariable_1variable_2 (Number of levels: 418) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.03      0.00     0.03     0.03 1.00     1359     2164
## 
## Population-Level Effects: 
##                                 Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept                          -0.01      0.00    -0.01    -0.00 1.00
## sigma_Intercept                    -2.20      0.00    -2.21    -2.20 1.00
## synthetic_r                         0.79      0.00     0.78     0.80 1.00
## sigma_polysynthetic_rdegreeEQ31    19.57      0.97    17.69    21.46 1.00
## sigma_polysynthetic_rdegreeEQ32    -0.50      0.81    -2.07     1.04 1.00
## sigma_polysynthetic_rdegreeEQ33    -4.86      0.79    -6.40    -3.32 1.00
##                                 Bulk_ESS Tail_ESS
## Intercept                           1175     1946
## sigma_Intercept                     4604     3177
## synthetic_r                         5015     2985
## sigma_polysynthetic_rdegreeEQ31     4931     3401
## sigma_polysynthetic_rdegreeEQ32     6335     3209
## sigma_polysynthetic_rdegreeEQ33     6573     3101
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pred <- conditional_effects(m_synth_r_items, method = "predict")
by_draw %>% mean_hdci(sigma)
## # A tibble: 1 × 6
##   sigma .lower .upper .width .point .interval
##   <dbl>  <dbl>  <dbl>  <dbl> <chr>  <chr>    
## 1 0.111  0.110  0.113   0.95 mean   hdci
plot(conditional_effects(m_synth_r_items, dpar = "sigma"), plot = F)[[1]] + 
  theme_bw() + 
  ylab("Prediction error (sigma)")

Scatter plot

ggplot(holdout_llm, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.1, size = 1) +
  geom_smooth(aes(
    x = synthetic_r,
    y = estimate__,
    ymin = lower__,
    ymax = upper__,
  ), stat = "identity", 
  color = "#a48500",
  fill = "#EDC951",
  data = as.data.frame(pred$synthetic_r)) +
  xlab("Synthetic inter-item correlation") + 
  ylab("Empirical inter-item correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> plot_items
plot_items

Interactive plot

This plot shows only 2000 randomly selected item pairs to conserve memory. A full interactive plot exists, but may react slowly.

item_pair_table <- holdout_llm %>% 
   left_join(holdout_mapping_data %>% select(variable_1 = variable,
                                             item_text_1 = item_text)) %>% 
   left_join(holdout_mapping_data %>% select(variable_2 = variable,
                                             item_text_2 = item_text))

# item_pair_table %>% filter(str_length(item_text_1) < 30, str_length(item_text_2) < 30) %>% 
#   left_join(pt_holdout_llm %>% rename(synthetic_r_pt = synthetic_r)) %>% 
#   select(item_text_1, item_text_2, empirical_r, synthetic_r, synthetic_r_pt) %>% View()
rio::export(item_pair_table, "ignore/item_pair_table.feather")

(item_pair_table %>% 
  mutate(synthetic_r = round(synthetic_r, 2),
         empirical_r = round(empirical_r, 2),
         items = str_replace_all(str_c(item_text_1, "\n", item_text_2),
                                  "_+", " ")) %>% 
    sample_n(2000) %>%
ggplot(., aes(synthetic_r, empirical_r, 
              # ymin = empirical_r - empirical_r_se, 
              # ymax = empirical_r + empirical_r_se, 
              label = items)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  xlab("Synthetic inter-item correlation") + 
  ylab("Empirical inter-item correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1))) %>% 
  ggplotly()
item_pair_table <- item_pair_table %>% 
  mutate(empirical_r = sprintf("%.2f±%.3f", empirical_r,
                                 empirical_r_se),
           synthetic_r = sprintf("%.2f", synthetic_r)) %>% 
  select(item_text_1, item_text_2, empirical_r, synthetic_r)
rio::export(item_pair_table, "item_pair_table.xlsx")

Robustness checks

Is the accuracy lower within/across scales and instruments?

holdout_llm %>% 
  mutate(same_instrument = if_else(InstrumentA == InstrumentB, 1, 0,0),
         same_scale = if_else(ScaleA == ScaleB, 1,0,0),
         same_subscale = if_else(same_scale & SubscaleA == SubscaleB, 1,0,0)) %>% 
  group_by(same_scale, same_instrument, same_subscale) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(same_instrument, same_scale, same_subscale, r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  arrange(same_instrument, same_scale, same_subscale) %>% 
  kable()
same_instrument same_scale same_subscale r conf.low conf.high n sd_emp_r
0 0 0 0.6525977 0.6484796 0.6566776 75364 0.1537790
0 1 0 0.8269917 0.8095247 0.8429959 1376 0.2801202
0 1 1 0.8269181 0.7293619 0.8915144 64 0.4934840
1 0 0 0.6005392 0.5855629 0.6151056 7200 0.1365890
1 1 0 0.7571886 0.7404990 0.7729447 2662 0.2582187
1 1 1 0.8372433 0.8085383 0.8619731 487 0.3847884

Is the accuracy lower outside classic Big Five?

holdout_llm %>% 
  mutate(big_five = case_when(
    str_detect(InstrumentA, "(Personality|Big Five)") & str_detect(InstrumentB, "(Personality|Big Five)") ~ "both",
    str_detect(InstrumentA, "(Personality|Big Five)") | str_detect(InstrumentB, "(Personality|Big Five)") ~ "either",
    TRUE ~ "none"
         )) %>% 
  group_by(big_five) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(big_five, r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  arrange(big_five) %>% 
  kable()
big_five r conf.low conf.high n sd_emp_r
both 0.7105135 0.7027822 0.7180770 16110 0.1754473
either 0.6618969 0.6565425 0.6671846 42840 0.1525947
none 0.6653591 0.6588041 0.6718132 28203 0.1697589

Is the accuracy lower for items that have low variance?

item_variances <- holdout_human_data %>% summarise_all(~ sd(., na.rm = T)) %>% 
  pivot_longer(everything(), names_to = "variable", values_to = "item_sd")

by_max_cov <- holdout_llm %>% 
  left_join(item_variances, by = c("variable_1" = "variable")) %>% 
  left_join(item_variances, by = c("variable_2" = "variable"), suffix = c("_1", "_2")) %>% 
  mutate(max_covariance = ceiling((item_sd_1 * item_sd_2)*10)/10)

rs_by_max_cov <- by_max_cov %>% 
  group_by(max_covariance) %>% 
  filter(n() > 3) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(max_covariance, r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  arrange(max_covariance)

rs_by_max_cov%>% 
  kable()
max_covariance r conf.low conf.high n sd_emp_r
0.5 0.8845860 0.6925673 0.9595392 16 0.0841287
0.6 0.5394056 0.4566420 0.6129128 319 0.1712504
0.7 0.5773367 0.5485306 0.6047739 2162 0.1591129
0.8 0.6331256 0.6192313 0.6466235 7355 0.1524384
0.9 0.6439129 0.6346448 0.6529958 15639 0.1496275
1.0 0.6333785 0.6255374 0.6410916 22779 0.1536605
1.1 0.6791602 0.6718518 0.6863364 21261 0.1657358
1.2 0.7290679 0.7206660 0.7372557 12257 0.1799820
1.3 0.7435976 0.7298785 0.7567179 4268 0.1774902
1.4 0.7194644 0.6869514 0.7491027 930 0.1587180
1.5 0.7077049 0.6188080 0.7787043 154 0.1447413
1.6 0.8896940 0.6452603 0.9688857 12 0.1835193
rs_by_max_cov %>% ggplot(aes(max_covariance, r, ymin = conf.low, ymax = conf.high)) +
  geom_pointrange()

by_max_cov%>% 
  filter(max_covariance > .7) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  kable()
estimate statistic p.value parameter conf.low conf.high method alternative sd_emp_r n
0.6758603 266.806 0 84654 0.6721843 0.6795029 Pearson’s product-moment correlation two.sided 0.1624048 84656
holdout_llm %>% 
  left_join(item_variances, by = c("variable_1" = "variable")) %>% 
  left_join(item_variances, by = c("variable_2" = "variable"), suffix = c("_1", "_2")) %>% 
  mutate(max_covariance = ceiling((item_sd_1 * item_sd_2)*10)/10) %>% 
  filter(max_covariance > 1) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  knitr::kable()
r conf.low conf.high n sd_emp_r
0.7085118 0.7035267 0.7134273 38883 0.1726419

Is the accuracy lower for the pre-trained model?

ggplot(pt_holdout_llm, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.1, size = 1) +
  xlab("Synthetic inter-item correlation") + 
  ylab("Empirical inter-item correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> pt_plot_items
pt_plot_items

Full table of synthetic and empirical item pair correlations

Synthetic Reliabilities

cors_llm <- holdout_llm %>%
  select(x = variable_1, y = variable_2, r = synthetic_r) %>%
  as.data.frame() |>
  igraph::graph_from_data_frame(directed = FALSE) |>
  igraph::as_adjacency_matrix(attr = "r", sparse = FALSE)
diag(cors_llm) <- 1

pt_cors_llm <- pt_holdout_llm %>%
  select(x = variable_1, y = variable_2, r = synthetic_r) %>%
  as.data.frame() |>
  igraph::graph_from_data_frame(directed = FALSE) |>
  igraph::as_adjacency_matrix(attr = "r", sparse = FALSE)
diag(pt_cors_llm) <- 1

cors_real <- holdout_llm %>%
  select(x = variable_1, y = variable_2, r = empirical_r) %>%
  as.data.frame() |>
  igraph::graph_from_data_frame(directed = FALSE) |>
  igraph::as_adjacency_matrix(attr = "r", sparse = FALSE)
diag(cors_real) <- 1

mapping_data <- holdout_mapping_data
items_by_scale <- bind_rows(
  holdout_scales %>% filter(scale_1 == "") %>% left_join(mapping_data %>% select(-scale_1), by = c("instrument", "scale_0")),
  holdout_scales %>% filter(scale_1 != "") %>% left_join(mapping_data, by = c("instrument", "scale_0", "scale_1"))
)
  
scales <- items_by_scale %>%
  group_by(keyed, scale) %>%
  summarise(
    items = list(variable),
    number_of_items = n_distinct(variable),
    lvn = paste(first(scale), " =~ ", paste(variable, collapse = " + "))) %>%
  drop_na() %>% 
  ungroup()


random_scales <- list()
for(i in 1:200) {
  n_items <- rpois(1, mean(scales$number_of_items))
  n_items <- if_else(n_items < 3, 3, n_items)
  random_scales[[i]] <- holdout_mapping_data %>%
    sample_n(n_items) %>%
    mutate(scale = paste0("random", i)) %>%
    group_by(scale) %>%
    summarise(
      items = list(variable),
      number_of_items = n_distinct(variable),
      lvn = paste(first(scale), " =~ ", paste(variable, collapse = " + "))) %>%
    drop_na() %>% 
    mutate(keyed = 1)
}

random_scales <- bind_rows(random_scales)
scales <- bind_rows(scales, random_scales)

source("global_functions.R")

scales <- scales %>% filter(number_of_items >= 3)

scales <- scales %>%
  rowwise() %>%
  mutate(r_real = list(cors_real[items, items]),
         pt_r_llm = list(pt_cors_llm[items, items]),
         r_llm = list(cors_llm[items, items])) %>%
  mutate(reverse_items = list(find_reverse_items_by_first_item(r_real, keyed)),
         r_real_rev = list(reverse_items(r_real, reverse_items)),
         pt_r_llm_rev = list(reverse_items(pt_r_llm, reverse_items)),
         r_llm_rev = list(reverse_items(r_llm, reverse_items))) %>%
  mutate(
    rel_real = list(psych::alpha(r_real_rev, keys = F, n.obs = N)$feldt),
    rel_llm = list(psych::alpha(r_llm_rev, keys = F, n.obs = N)$feldt),
    rel_pt_llm = list(psych::alpha(pt_r_llm_rev, keys = F, n.obs = N)$feldt)) %>%
  mutate(empirical_alpha = rel_real$alpha$raw_alpha,
         synthetic_alpha = rel_llm$alpha$raw_alpha,
         pt_synthetic_alpha = rel_pt_llm$alpha$raw_alpha) %>%
  mutate(
    empirical_alpha_se = mean(diff(unlist(psychometric::alpha.CI(empirical_alpha, k = number_of_items, N = N, level = 0.95))))
  )

scales <- scales %>% filter(empirical_alpha > 0)
# qplot(scales$empirical_alpha_se)
# qplot(scales$empirical_alpha, scales$empirical_alpha_se)
# qplot(scales$number_of_items, scales$empirical_alpha_se)
# qplot(scales$empirical_alpha, scales$empirical_alpha_se, color = scales$number_of_items)

Accuracy

se2 <- mean(scales$empirical_alpha_se^2)
r <- broom::tidy(cor.test(scales$empirical_alpha, scales$synthetic_alpha))
pt_r <- broom::tidy(cor.test(scales$empirical_alpha, scales$pt_synthetic_alpha))

model <- paste0('
  # Latent variables
  latent_real_rel =~ 1*empirical_alpha

  # Fixing error variances based on known standard errors
  empirical_alpha ~~ ',se2,'*empirical_alpha

  # Relationship between latent variables
  latent_real_rel ~~ synthetic_alpha
')

fit <- sem(model, data = scales)
pt_fit <- sem(model, data = scales %>% 
                select(empirical_alpha, synthetic_alpha = pt_synthetic_alpha))

m_lmsynth_rel_scales <- brm(
  bf(empirical_alpha | mi(empirical_alpha_se) ~ synthetic_alpha,
     sigma ~ poly(synthetic_alpha, degree = 3)), data = scales, 
  file = "ignore/m_synth_rel_lm")

newdata <- m_lmsynth_rel_scales$data %>% select(empirical_alpha, synthetic_alpha, empirical_alpha_se)
epreds <- epred_draws(newdata = newdata, obj = m_lmsynth_rel_scales, re_formula = NA)
preds <- predicted_draws(newdata = newdata, obj = m_lmsynth_rel_scales, re_formula = NA)
epred_preds <- epreds %>% left_join(preds)
by_draw <- epred_preds %>% group_by(.draw) %>% 
  summarise(.epred = var(.epred),
            .prediction = var(.prediction),
            sigma = sqrt(.prediction - .epred),
            semi_latent_r = sqrt(.epred/.prediction))

accuracy_bayes_rels <- by_draw %>% mean_hdci(semi_latent_r)

bind_rows(
  pt_r %>% 
    mutate(model = "pre-trained", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(pt_fit) %>% 
    filter(lhs == "latent_real_rel", rhs ==  "synthetic_alpha") %>% 
    mutate(model = "pre-trained", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  r %>% 
    mutate(model = "fine-tuned", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(fit) %>% 
    filter(lhs == "latent_real_rel", rhs ==  "synthetic_alpha") %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  accuracy_bayes_rels %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (Bayesian EIV)") %>% 
    select(model, kind, accuracy = semi_latent_r, conf.low = .lower, conf.high = .upper)
  ) %>% 
  knitr::kable(digits = 2)
model kind accuracy conf.low conf.high
pre-trained manifest 0.07 -0.04 0.18
pre-trained semi-latent (SEM) 0.08 -0.04 0.19
fine-tuned manifest 0.82 0.78 0.85
fine-tuned semi-latent (SEM) 0.87 0.83 0.90
fine-tuned semi-latent (Bayesian EIV) 0.86 0.75 0.95

Prediction error plot according to synthetic estimate

m_lmsynth_rel_scales
##  Family: gaussian 
##   Links: mu = identity; sigma = log 
## Formula: empirical_alpha | mi(empirical_alpha_se) ~ synthetic_alpha 
##          sigma ~ poly(synthetic_alpha, degree = 3)
##    Data: scales (Number of observations: 303) 
##   Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup draws = 4000
## 
## Population-Level Effects: 
##                                     Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept                               0.25      0.01     0.22     0.28 1.00
## sigma_Intercept                        -2.35      0.06    -2.46    -2.24 1.00
## synthetic_alpha                         0.72      0.02     0.68     0.76 1.00
## sigma_polysynthetic_alphadegreeEQ31    -5.91      0.99    -7.78    -3.96 1.00
## sigma_polysynthetic_alphadegreeEQ32    -1.34      0.92    -3.09     0.53 1.00
## sigma_polysynthetic_alphadegreeEQ33    -2.81      0.83    -4.49    -1.24 1.00
##                                     Bulk_ESS Tail_ESS
## Intercept                               3860     3274
## sigma_Intercept                         2954     2904
## synthetic_alpha                         4019     3282
## sigma_polysynthetic_alphadegreeEQ31     3005     2850
## sigma_polysynthetic_alphadegreeEQ32     3919     3374
## sigma_polysynthetic_alphadegreeEQ33     4274     3355
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pred <- conditional_effects(m_lmsynth_rel_scales, method = "predict")
by_draw %>% 
  filter(!is.nan(sigma)) %>% mean_hdci(sigma)
## # A tibble: 1 × 6
##   sigma .lower .upper .width .point .interval
##   <dbl>  <dbl>  <dbl>  <dbl> <chr>  <chr>    
## 1 0.116 0.0657  0.174   0.95 mean   hdci
plot(conditional_effects(m_lmsynth_rel_scales, dpar = "sigma"), plot = F)[[1]] + 
  theme_bw() + 
  scale_x_continuous(limits = c(0,1))+
  scale_y_continuous(limits = c(0,0.13)) +
  ylab("Prediction error (sigma)")

Scatter plot

ggplot(scales, aes(synthetic_alpha, empirical_alpha, 
                   color = str_detect(scale, "^random"), 
              ymin = empirical_alpha - empirical_alpha_se,
              ymax = empirical_alpha + empirical_alpha_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(alpha = 0.6, size = 1) +
  geom_smooth(aes(
    x = synthetic_alpha,
    y = estimate__,
    ymin = lower__,
    ymax = upper__,
  ), stat = "identity", 
  color = "#a48500",
  fill = "#EDC951",
  data = as.data.frame(pred$synthetic_alpha)) +
  scale_color_manual(values = c("#00A0B0", "#6A4A3C"),
                     guide = "none") +
  xlab("Synthetic Cronbach's alpha") + 
  ylab("Empirical Cronbach's alpha") +
  theme_bw() +
  coord_fixed(xlim = c(0,1), ylim = c(0,1)) -> plot_rels
plot_rels

Interactive plot

(scales %>% 
  filter(!str_detect(scale, "^random")) %>% 
  mutate(synthetic_alpha = round(synthetic_alpha, 2),
         empirical_alpha = round(empirical_alpha, 2),
         scale = str_replace_all(scale, "_+", " ")) %>% 
ggplot(., aes(synthetic_alpha, empirical_alpha, 
              # ymin = empirical_r - empirical_r_se, 
              # ymax = empirical_r + empirical_r_se, 
              label = scale)) + 
  geom_abline(linetype = "dashed") +
  geom_point(alpha = 0.3, size = 1, color = "#00A0B0") +
  xlab("Synthetic Cronbach's alpha") + 
  ylab("Empirical Cronbach's alpha") +
  theme_bw() +
  theme(legend.position='none') + 
  coord_fixed(xlim = c(NA,1), ylim = c(NA,1))) %>% 
  ggplotly()

Table

scales %>% 
  filter(!str_detect(scale, "^random")) %>% 
  mutate(empirical_alpha = sprintf("%.2f±%.3f", empirical_alpha,
                               empirical_alpha_se),
         synthetic_alpha = sprintf("%.2f", synthetic_alpha),
         scale = str_replace_all(scale, "_+", " ")
         ) %>% 
  select(scale, empirical_alpha, synthetic_alpha, number_of_items) %>% 
  DT::datatable(rownames = FALSE,
                filter = "top")

Robustness checks

scales %>% 
  group_by(str_detect(scale, "^random")) %>% 
  summarise(broom::tidy(cor.test(synthetic_alpha, empirical_alpha)), sd_alpha = sd(empirical_alpha), n = n()) %>% 
  knitr::kable()
str_detect(scale, “^random”) estimate statistic p.value parameter conf.low conf.high method alternative sd_alpha n
FALSE 0.6317914 8.351973 0 105 0.5021686 0.7336481 Pearson’s product-moment correlation two.sided 0.0992460 107
TRUE 0.6932367 13.397440 0 194 0.6126040 0.7595885 Pearson’s product-moment correlation two.sided 0.1594832 196

Although the number of items alone can of course predict Cronbach’s alpha, the synthetic alphas explain much more variance in empirical alphas.

scales %>% 
  ungroup() %>% 
  summarise(broom::tidy(cor.test(number_of_items, empirical_alpha)), sd_alpha = sd(empirical_alpha), n = n()) %>% 
  knitr::kable()
estimate statistic p.value parameter conf.low conf.high method alternative sd_alpha n
0.2366839 4.226398 3.15e-05 301 0.1274036 0.3402868 Pearson’s product-moment correlation two.sided 0.2276686 303
summary(lm(empirical_alpha ~ number_of_items, scales))
## 
## Call:
## lm(formula = empirical_alpha ~ number_of_items, data = scales)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.47673 -0.16576 -0.01129  0.20063  0.45562 
## 
## Coefficients:
##                 Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     0.415447   0.025891  16.046  < 2e-16 ***
## number_of_items 0.014174   0.003354   4.226 3.15e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.2216 on 301 degrees of freedom
## Multiple R-squared:  0.05602,    Adjusted R-squared:  0.05288 
## F-statistic: 17.86 on 1 and 301 DF,  p-value: 3.152e-05
summary(lm(empirical_alpha ~ number_of_items + synthetic_alpha, scales))
## 
## Call:
## lm(formula = empirical_alpha ~ number_of_items + synthetic_alpha, 
##     data = scales)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.39508 -0.07434 -0.00318  0.07120  0.76137 
## 
## Coefficients:
##                 Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     0.232096   0.017121  13.556   <2e-16 ***
## number_of_items 0.002904   0.002036   1.427    0.155    
## synthetic_alpha 0.671944   0.028294  23.749   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.1308 on 300 degrees of freedom
## Multiple R-squared:  0.6722, Adjusted R-squared:   0.67 
## F-statistic: 307.6 on 2 and 300 DF,  p-value: < 2.2e-16

Is the accuracy lower for the pre-trained model?

ggplot(scales, aes(pt_synthetic_alpha, empirical_alpha, 
                   color = str_detect(scale, "^random"), 
              ymin = empirical_alpha - empirical_alpha_se,
              ymax = empirical_alpha + empirical_alpha_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(alpha = 0.6, size = 1) +
  scale_color_manual(values = c("#00A0B0", "#6A4A3C"),
                     guide = "none") +
  xlab("Synthetic Cronbach's alpha") + 
  ylab("Empirical Cronbach's alpha") +
  theme_bw() +
  coord_fixed(xlim = c(0,1), ylim = c(0,1)) -> pt_plot_rels
pt_plot_rels

Synthetic Scale Correlations

manifest_scores = arrow::read_feather(file = file.path(data_path, glue("ignore.{model_name}.raw.osf-bainbridge-2021-s2-0.scale_correlations.feather")))
pt_manifest_scores = arrow::read_feather(file = file.path(data_path, glue("ignore.{pretrained_model_name}.raw.osf-bainbridge-2021-s2-0.scale_correlations.feather")))

n_distinct(manifest_scores$scale_a)
## [1] 112
manifest_scores <- manifest_scores %>%
 left_join(scales, by = c("scale_a" = "scale")) %>%
 left_join(scales, by = c("scale_b" = "scale"))

Accuracy

r <- broom::tidy(cor.test(manifest_scores$empirical_r, manifest_scores$synthetic_r))
pt_r <- broom::tidy(cor.test(pt_manifest_scores$empirical_r, pt_manifest_scores$synthetic_r))

se2 <- mean(manifest_scores$empirical_r_se^2)
model <- paste0('
    # Latent variables
    PearsonLatent =~ 1*empirical_r

    # Fixing error variances based on known standard errors
    empirical_r ~~ ',se2,'*empirical_r

    # Relationship between latent variables
    PearsonLatent ~~ synthetic_r
  ')

fit <- sem(model, data = manifest_scores)
pt_fit <- sem(model, data = pt_manifest_scores)


m_lmsynth_r_scales <- brm(
  bf(empirical_r | mi(empirical_r_se) ~ synthetic_r + (1|mm(scale_a, scale_b)),
     sigma ~ poly(synthetic_r, degree = 3)), data = manifest_scores, 
  file = "ignore/m_synth_r_scales_lm8")

sd_synth <- sd(m_lmsynth_r_scales$data$synthetic_r)


newdata <- m_lmsynth_r_scales$data %>% select(empirical_r, synthetic_r, empirical_r_se)
epreds <- epred_draws(newdata = newdata, obj = m_lmsynth_r_scales, re_formula = NA)
preds <- predicted_draws(newdata = newdata, obj = m_lmsynth_r_scales, re_formula = NA)
epred_preds <- epreds %>% left_join(preds)
by_draw <- epred_preds %>% group_by(.draw) %>% 
  summarise(.epred = var(.epred),
            .prediction = var(.prediction),
            sigma = sqrt(.prediction - .epred),
            semi_latent_r = sqrt(.epred/.prediction))

accuracy_bayes_scales <- by_draw %>% mean_hdci(semi_latent_r)

bind_rows(
  pt_r %>% 
    mutate(model = "pre-trained", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(pt_fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "pre-trained", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  r %>% 
    mutate(model = "fine-tuned", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  accuracy_bayes_scales %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (Bayesian EIV)") %>% 
    select(model, kind, accuracy = semi_latent_r, conf.low = .lower, conf.high = .upper)
  ) %>% 
  knitr::kable(digits = 2)
model kind accuracy conf.low conf.high
pre-trained manifest 0.33 0.30 0.35
pre-trained semi-latent (SEM) 0.33 0.31 0.35
fine-tuned manifest 0.87 0.86 0.87
fine-tuned semi-latent (SEM) 0.88 0.87 0.89
fine-tuned semi-latent (Bayesian EIV) 0.89 0.88 0.90

Prediction error plot according to synthetic estimate

m_lmsynth_r_scales
##  Family: gaussian 
##   Links: mu = identity; sigma = log 
## Formula: empirical_r | mi(empirical_r_se) ~ synthetic_r + (1 | mm(scale_a, scale_b)) 
##          sigma ~ poly(synthetic_r, degree = 3)
##    Data: manifest_scores (Number of observations: 6245) 
##   Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup draws = 4000
## 
## Group-Level Effects: 
## ~mmscale_ascale_b (Number of levels: 113) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.07      0.01     0.06     0.08 1.00     1342     2191
## 
## Population-Level Effects: 
##                                 Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept                           0.01      0.01     0.00     0.03 1.01
## sigma_Intercept                    -2.15      0.01    -2.17    -2.13 1.00
## synthetic_r                         0.95      0.01     0.94     0.97 1.00
## sigma_polysynthetic_rdegreeEQ31    -0.06      1.04    -2.09     2.02 1.00
## sigma_polysynthetic_rdegreeEQ32    -6.00      0.85    -7.64    -4.29 1.00
## sigma_polysynthetic_rdegreeEQ33    -0.16      0.89    -1.87     1.58 1.00
##                                 Bulk_ESS Tail_ESS
## Intercept                            822     1443
## sigma_Intercept                     6041     2887
## synthetic_r                         6720     3387
## sigma_polysynthetic_rdegreeEQ31     3704     3465
## sigma_polysynthetic_rdegreeEQ32     5654     3555
## sigma_polysynthetic_rdegreeEQ33     5773     3703
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
by_draw %>% mean_hdci(sigma)
## # A tibble: 1 × 6
##   sigma .lower .upper .width .point .interval
##   <dbl>  <dbl>  <dbl>  <dbl> <chr>  <chr>    
## 1 0.118  0.111  0.124   0.95 mean   hdci
pred <- conditional_effects(m_lmsynth_r_scales, method = "predict")
plot(conditional_effects(m_lmsynth_r_scales, dpar = "sigma"), plot = F)[[1]] + 
  theme_bw() + 
  ylab("Prediction error (sigma)")

Scatter plot

ggplot(manifest_scores, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  geom_smooth(aes(
    x = synthetic_r,
    y = estimate__,
    ymin = lower__,
    ymax = upper__,
  ), stat = "identity", 
  color = "#a48500",
  fill = "#EDC951",
  data = as.data.frame(pred$synthetic_r)) +
  xlab("Synthetic inter-scale correlation") + 
  ylab("Empirical inter-scale correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> plot_scales
plot_scales

Interactive plot

(manifest_scores %>% 
  mutate(synthetic_r = round(synthetic_r, 2),
         empirical_r = round(empirical_r, 2),
         scales = str_replace_all(str_c(scale_a, "\n", scale_b),
                                  "_+", " ")) %>% 
ggplot(., aes(synthetic_r, empirical_r, 
              # ymin = empirical_r - empirical_r_se, 
              # ymax = empirical_r + empirical_r_se, 
              label = scales)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  xlab("Synthetic inter-scale correlation") + 
  ylab("Empirical inter-scale correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1))) %>% 
  ggplotly()
Table
manifest_scores %>% 
                mutate(empirical_r = sprintf("%.2f±%.3f", empirical_r,
                                             empirical_r_se),
                       synthetic_r = sprintf("%.2f", synthetic_r),
                       scale_a = str_replace_all(scale_a, "_+", " "),
                       scale_b = str_replace_all(scale_b, "_+", " ")
                       ) %>% 
                select(scale_a, scale_b, empirical_r, synthetic_r) %>% 
  DT::datatable(rownames = FALSE,
                filter = "top")

Robustness checks

How does number of items across the two scales relate to accuracy?

by_item_number <- manifest_scores %>%
  mutate(items = number_of_items.x + number_of_items.y) %>%
  group_by(items) %>%
  summarise(broom::tidy(cor.test(empirical_r, synthetic_r)), pairwise_n = n()) 

by_item_number %>% 
  ggplot(aes(items, estimate, ymin = conf.low, ymax = conf.high)) + 
  geom_pointrange() +
  scale_y_continuous("Manifest accuracy (with 95% confidence interval)") +
  xlab("Number of items summed across scales")

lm(estimate ~ items, by_item_number, weights = 1/(by_item_number$conf.high-by_item_number$conf.low))
## 
## Call:
## lm(formula = estimate ~ items, data = by_item_number, weights = 1/(by_item_number$conf.high - 
##     by_item_number$conf.low))
## 
## Coefficients:
## (Intercept)        items  
##    0.845728     0.002668
manifest_scores %>%
  filter(number_of_items.x >= 10, number_of_items.y >= 10) %>%
  summarise(cor = cor(empirical_r, synthetic_r), n())
## # A tibble: 1 × 2
##     cor `n()`
##   <dbl> <int>
## 1 0.926   300

Is the accuracy lower for the pre-trained model?

ggplot(pt_manifest_scores, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  xlab("Synthetic inter-scale correlation") + 
  ylab("Empirical inter-scale correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> pt_plot_scales
pt_plot_scales

Combined plot

library(patchwork)
pt_plot_items2 <- pt_plot_items +
    annotate("text", size = 2.5, x = 0.5, y = -0.8, vjust = 1, hjust = 1, label = "r(I fear for the worst,\nI never worry about anything)", color = "#00A0B0") +
  annotate("segment", x = 0, y = -0.78, xend = 0.2761906, yend = -0.3459711, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.1, y = 0.5, hjust = 1, label = "r(I get angry easily,\nI lose my temper)", color = "#00A0B0") + 
  annotate("segment", x = -.1, y = 0.5, xend = 0.6935711, yend = 0.7140546, color = "#00A0B0", alpha = 0.7)
  

plot_items2 <- plot_items +
  annotate("text", size = 3, x = -1, y = 0.98, vjust = 0, hjust = 0, label = with(accuracy_bayes_items, { sprintf("accuracy = %.2f [%.2f;%.2f]", semi_latent_r, .lower, .upper) })) +
  annotate("text", size = 2.5, x = 0.5, y = -0.8, vjust = 1, hjust = 1, label = "r(I fear for the worst,\nI never worry about anything)", color = "#00A0B0") +
  annotate("segment", x = 0, y = -0.78, xend = -0.1104686, yend = -0.3459711, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.1, y = 0.5, hjust = 1, label = "r(I get angry easily,\nI lose my temper)", color = "#00A0B0") + 
  annotate("segment", x = -.1, y = 0.5, xend = 0.8031979, yend = 0.7140546, color = "#00A0B0", alpha = 0.7)
  # annotate("text", size = 2.5, x = -0.5, y = -0.8, hjust = 0, label = "r(I love life,\nI avoid crowds)", color = "#00A0B0") +
  # annotate("segment", x = -.3, y = -0.7, xend = -0.23, yend = -0.28, color = "#00A0B0", alpha = 0.7) +
  # annotate("text", size = 2.5, x = -.3, y = 0.5, hjust = 1, label = "r(I work hard,\nI am diligent)", color = "#00A0B0") +
  # annotate("segment", x = -.3, y = .5, xend = .52, yend = .58, color = "#00A0B0", alpha = 0.7)


pt_plot_rels2 <- pt_plot_rels + 
  annotate("text", size = 2.5, x = 0.61, y = 0.25, hjust = 0, label = "IPIP Extraversion", color = "#00A0B0") +
  annotate("segment", x = 0.7, y = 0.27, xend = 0.9, yend = 0.90, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -0.02, y = 0.95, hjust = 0, label = "LOT Optimism", color = "#00A0B0") +
  annotate("segment", x = 0.11, y = 0.93, xend = -1.477226, yend = 0.71, color = "#00A0B0", alpha = 0.7)

plot_rels2 <- plot_rels + 
  annotate("text", size = 3, x = 0, y = .985, vjust = 0, hjust = 0, label = with(accuracy_bayes_rels, { sprintf("accuracy = %.2f [%.2f;%.2f]", semi_latent_r, .lower, .upper) })) +
  annotate("text", size = 2.5, x = 0.4, y = 0.1, hjust = 0, label = "randomly formed scales", color = "#6A4A3C") +
  annotate("segment", x = 0.395, y = 0.1, xend = 0.39, yend = 0.4864297, color = "#6A4A3C", alpha = 0.7) +
  annotate("segment", x = 0.395, y = 0.1, xend = 0.285, yend = 0.2279919, color = "#6A4A3C", alpha = 0.7) +
  annotate("segment", x = 0.395, y = 0.1, xend = 0.2, yend = 0.075, color = "#6A4A3C", alpha = 0.7) +
  annotate("text", size = 2.5, x = 0.61, y = 0.4, hjust = 0, label = "IPIP Extraversion", color = "#00A0B0") +
  annotate("segment", x = 0.75, y = 0.42, xend = 0.87, yend = 0.90, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -0.02, y = 0.86, hjust = 0, label = "LOT Optimism", color = "#00A0B0") +
  annotate("segment", x = 0.11, y = 0.83, xend = 0.15, yend = 0.71, color = "#00A0B0", alpha = 0.7)

pt_plot_scales2 <- pt_plot_scales +
  annotate("text", size = 2.5, x = -0.2, y = 0.8, hjust = 1, label = "r(BFI Neuroticism,\nIPIP Neuroticism)", color = "#00A0B0") +
  annotate("segment", x = -.2, y = 0.8, xend = 0.22, yend = .84, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.1, y = -0.9, hjust = 0, label = "r(BFI Depression facet,\nLOT Optimism)", color = "#00A0B0") +
  annotate("segment", x = -.1, y = -.9, xend = -0.17, yend = -.63, color = "#00A0B0", alpha = 0.7)


plot_scales2 <- plot_scales +
  annotate("text", size = 3, x = -1, y = 0.98, vjust = 0, hjust = 0, label = with(accuracy_bayes_scales, { sprintf("accuracy = %.2f [%.2f;%.2f]", semi_latent_r, .lower, .upper) })) +
  annotate("text", size = 2.5, x = -0.1, y = 0.5, hjust = 1, label = "r(BFI Neuroticism,\nIPIP Neuroticism)", color = "#00A0B0") +
  annotate("segment", x = -.1, y = 0.5, xend = .84, yend = .84, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.15, y = -0.7, hjust = 0, label = "r(BFI Depression facet,\nLOT Optimism)", color = "#00A0B0") +
  annotate("segment", x = -.15, y = -.7, xend = -.34, yend = -.63, color = "#00A0B0", alpha = 0.7)


(pt_plot_items2+
    pt_plot_rels2 + ggtitle("") +
    pt_plot_scales2) /


(plot_items2 +
    plot_rels2 + ggtitle("SurveyBot 3000")  +
    plot_scales2) +
  plot_annotation(
  title = 'Pre-trained model before domain adaptation and fine-tuning'
)

ggsave("ignore/Figure_pilot.pdf", width = 8.3, height = 6, device = grDevices::cairo_pdf)
ggsave("ignore/Figure_pilot.png", width = 8.3, height = 6)



# ggsave("ignore/Figure_pilot.svg", width = 8.3, height = 5.5, device = svglite::svglite)
---
title: "Language models accurately infer correlations between psychological items and scales from text alone"
date: "2023-11-07"
output: 
  html_document:
    toc: true
    toc_float: true
---

```{css, echo=FALSE}
summary {
  display: block;
  margin-bottom: 2em;
}

summary h3, summary h4 {
  display: list-item;
}
```

Here, we apply the pre-trained model and our fine-tuned model to data not used for training, a holdout. The holdout sample was collected by Bainbridge et al. 2022.

```{r warning=F,message=F}
knitr::opts_chunk$set(echo = TRUE, error = T, message = F, warning = F)

# Libraries and Settings

# Libs ---------------------------
library(knitr)
library(tidyverse)
library(arrow)
library(glue)
library(psych)
library(lavaan)
library(ggplot2)
library(plotly)
library(gridExtra)
library(broom)
library(broom.mixed)
library(brms)
library(tidybayes)
library(cmdstanr)
library(cowplot)

options(mc.cores = parallel::detectCores(), 
        brms.backend = "cmdstanr", 
        brms.file_refit = "on_change")


model_name = "ItemSimilarityTraining-20240502-trial12"
#model_name = "item-similarity-20231018-122504"
pretrained_model_name = "all-mpnet-base-v2"

data_path = glue("./")
pretrained_data_path = glue("./")

set.seed(42)


holdout <- arrow::read_feather(file = file.path(data_path, glue("ignore.{model_name}.raw.osf-bainbridge-2021-s2-0.item_correlations.feather")))

pt_holdout <- arrow::read_feather(file = file.path(data_path, glue("ignore.{pretrained_model_name}.raw.osf-bainbridge-2021-s2-0.item_correlations.feather")))

holdout_mapping_data = arrow::read_feather(
  file = file.path(data_path, glue("{model_name}.raw.osf-bainbridge-2021-s2-0.mapping2.feather"))
) %>%
  rename(scale_0 = scale0,
         scale_1 = scale1)

holdout_human_data = arrow::read_feather(
  file = file.path(data_path, glue("{model_name}.raw.osf-bainbridge-2021-s2-0.human.feather"))
)

holdout_scales <- arrow::read_feather(file.path(data_path, glue("{model_name}.raw.osf-bainbridge-2021-s2-0.scales.feather"))
)

N <- holdout_human_data %>% summarise_all(~ sum(!is.na(.))) %>% min()
total_N <- nrow(holdout_human_data)
```

The Bainbridge data was collected on N=`r total_N` respondents. The item with the most missing values still had n=`r N`.


## Synthetic inter-item correlations
```{r}
holdout_llm <- holdout %>%
  left_join(holdout_mapping_data %>% select(variable_1 = variable, InstrumentA = instrument, ScaleA = scale_0, SubscaleA = scale_1)) %>%
  left_join(holdout_mapping_data %>% select(variable_2 = variable, InstrumentB = instrument, ScaleB = scale_0, SubscaleB = scale_1))

pt_holdout_llm <- pt_holdout %>%
  left_join(holdout_mapping_data %>% select(variable_1 = variable, InstrumentA = instrument, ScaleA = scale_0, SubscaleA = scale_1)) %>%
  left_join(holdout_mapping_data %>% select(variable_2 = variable, InstrumentB = instrument, ScaleB = scale_0, SubscaleB = scale_1))
```


### Accuracy
```{r}
se2 <- mean(holdout_llm$empirical_r_se^2)

r <- broom::tidy(cor.test(holdout_llm$empirical_r, holdout_llm$synthetic_r))
pt_r <- broom::tidy(cor.test(pt_holdout_llm$empirical_r, pt_holdout_llm$synthetic_r))

model <- paste0('
  # Latent variables
  PearsonLatent =~ 1*empirical_r

  # Fixing error variances based on known standard errors
  empirical_r ~~ ',se2,'*empirical_r

  # Relationship between latent variables
  PearsonLatent ~~ synthetic_r
')

fit <- sem(model, data = holdout_llm)
pt_fit <- sem(model, data = pt_holdout_llm)

m_synth_r_items <- brm(
  bf(empirical_r | mi(empirical_r_se) ~ synthetic_r + (1|mm(variable_1, variable_2)),
     sigma ~ poly(synthetic_r, degree = 3)), data = holdout_llm, 
  file = "ignore/m_synth_r_items_lm")

sd_synth <- sd(m_synth_r_items$data$synthetic_r)

newdata <- m_synth_r_items$data %>% select(empirical_r, synthetic_r, empirical_r_se)
epreds <- epred_draws(newdata = newdata, obj = m_synth_r_items, re_formula = NA, ndraws = 200)
preds <- predicted_draws(newdata = newdata, obj = m_synth_r_items, re_formula = NA, ndraws = 200)
epred_preds <- epreds %>% left_join(preds)
by_draw <- epred_preds %>% group_by(.draw) %>% 
  summarise(.epred = var(.epred),
            .prediction = var(.prediction),
            sigma = sqrt(.prediction - .epred),
            semi_latent_r = sqrt(.epred/.prediction))
rm(epred_preds)

accuracy_bayes_items <- by_draw %>% mean_hdci(semi_latent_r)

bind_rows(
  pt_r %>% 
    mutate(model = "pre-trained", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(pt_fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "pre-trained", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  r %>% 
    mutate(model = "fine-tuned", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  accuracy_bayes_items %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (Bayesian EIV)") %>% 
    select(model, kind, accuracy = semi_latent_r, conf.low = .lower, conf.high = .upper)
  ) %>% 
  knitr::kable(digits = 2)
```


<details><summary><h4>Prediction error plot according to synthetic estimate</h4></summary>

```{r}
m_synth_r_items

pred <- conditional_effects(m_synth_r_items, method = "predict")
by_draw %>% mean_hdci(sigma)
plot(conditional_effects(m_synth_r_items, dpar = "sigma"), plot = F)[[1]] + 
  theme_bw() + 
  ylab("Prediction error (sigma)")
```

</details>



### Scatter plot
```{r}
ggplot(holdout_llm, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.1, size = 1) +
  geom_smooth(aes(
    x = synthetic_r,
    y = estimate__,
    ymin = lower__,
    ymax = upper__,
  ), stat = "identity", 
  color = "#a48500",
  fill = "#EDC951",
  data = as.data.frame(pred$synthetic_r)) +
  xlab("Synthetic inter-item correlation") + 
  ylab("Empirical inter-item correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> plot_items
plot_items
```

### Interactive plot
This plot shows only 2000 randomly selected item pairs to conserve memory. A [full interactive plot](2_interactive_item_plot.html) exists, but may react slowly.

```{r}
item_pair_table <- holdout_llm %>% 
   left_join(holdout_mapping_data %>% select(variable_1 = variable,
                                             item_text_1 = item_text)) %>% 
   left_join(holdout_mapping_data %>% select(variable_2 = variable,
                                             item_text_2 = item_text))

# item_pair_table %>% filter(str_length(item_text_1) < 30, str_length(item_text_2) < 30) %>% 
#   left_join(pt_holdout_llm %>% rename(synthetic_r_pt = synthetic_r)) %>% 
#   select(item_text_1, item_text_2, empirical_r, synthetic_r, synthetic_r_pt) %>% View()
rio::export(item_pair_table, "ignore/item_pair_table.feather")

(item_pair_table %>% 
  mutate(synthetic_r = round(synthetic_r, 2),
         empirical_r = round(empirical_r, 2),
         items = str_replace_all(str_c(item_text_1, "\n", item_text_2),
                                  "_+", " ")) %>% 
    sample_n(2000) %>%
ggplot(., aes(synthetic_r, empirical_r, 
              # ymin = empirical_r - empirical_r_se, 
              # ymax = empirical_r + empirical_r_se, 
              label = items)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  xlab("Synthetic inter-item correlation") + 
  ylab("Empirical inter-item correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1))) %>% 
  ggplotly()

item_pair_table <- item_pair_table %>% 
  mutate(empirical_r = sprintf("%.2f±%.3f", empirical_r,
                                 empirical_r_se),
           synthetic_r = sprintf("%.2f", synthetic_r)) %>% 
  select(item_text_1, item_text_2, empirical_r, synthetic_r)
rio::export(item_pair_table, "item_pair_table.xlsx")
```

<details><summary><h3>Robustness checks</h3></summary>


Is the accuracy lower within/across scales and instruments?

```{r}
holdout_llm %>% 
  mutate(same_instrument = if_else(InstrumentA == InstrumentB, 1, 0,0),
         same_scale = if_else(ScaleA == ScaleB, 1,0,0),
         same_subscale = if_else(same_scale & SubscaleA == SubscaleB, 1,0,0)) %>% 
  group_by(same_scale, same_instrument, same_subscale) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(same_instrument, same_scale, same_subscale, r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  arrange(same_instrument, same_scale, same_subscale) %>% 
  kable()
```

Is the accuracy lower outside classic Big Five?

```{r}
holdout_llm %>% 
  mutate(big_five = case_when(
    str_detect(InstrumentA, "(Personality|Big Five)") & str_detect(InstrumentB, "(Personality|Big Five)") ~ "both",
    str_detect(InstrumentA, "(Personality|Big Five)") | str_detect(InstrumentB, "(Personality|Big Five)") ~ "either",
    TRUE ~ "none"
         )) %>% 
  group_by(big_five) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(big_five, r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  arrange(big_five) %>% 
  kable()
```

Is the accuracy lower for items that have low variance?

```{r}
item_variances <- holdout_human_data %>% summarise_all(~ sd(., na.rm = T)) %>% 
  pivot_longer(everything(), names_to = "variable", values_to = "item_sd")

by_max_cov <- holdout_llm %>% 
  left_join(item_variances, by = c("variable_1" = "variable")) %>% 
  left_join(item_variances, by = c("variable_2" = "variable"), suffix = c("_1", "_2")) %>% 
  mutate(max_covariance = ceiling((item_sd_1 * item_sd_2)*10)/10)

rs_by_max_cov <- by_max_cov %>% 
  group_by(max_covariance) %>% 
  filter(n() > 3) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(max_covariance, r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  arrange(max_covariance)

rs_by_max_cov%>% 
  kable()
rs_by_max_cov %>% ggplot(aes(max_covariance, r, ymin = conf.low, ymax = conf.high)) +
  geom_pointrange()

by_max_cov%>% 
  filter(max_covariance > .7) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  kable()

holdout_llm %>% 
  left_join(item_variances, by = c("variable_1" = "variable")) %>% 
  left_join(item_variances, by = c("variable_2" = "variable"), suffix = c("_1", "_2")) %>% 
  mutate(max_covariance = ceiling((item_sd_1 * item_sd_2)*10)/10) %>% 
  filter(max_covariance > 1) %>% 
  summarise(broom::tidy(cor.test(synthetic_r, empirical_r)), sd_emp_r = sd(empirical_r), n = n()) %>% 
  select(r = estimate, conf.low, conf.high, n, sd_emp_r) %>% 
  knitr::kable()
```



Is the accuracy lower for the pre-trained model?

```{r}
ggplot(pt_holdout_llm, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.1, size = 1) +
  xlab("Synthetic inter-item correlation") + 
  ylab("Empirical inter-item correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> pt_plot_items
pt_plot_items
```


</details>



[Full table of synthetic and empirical item pair correlations](item_pair_table.xlsx)


## Synthetic Reliabilities
```{r}
cors_llm <- holdout_llm %>%
  select(x = variable_1, y = variable_2, r = synthetic_r) %>%
  as.data.frame() |>
  igraph::graph_from_data_frame(directed = FALSE) |>
  igraph::as_adjacency_matrix(attr = "r", sparse = FALSE)
diag(cors_llm) <- 1

pt_cors_llm <- pt_holdout_llm %>%
  select(x = variable_1, y = variable_2, r = synthetic_r) %>%
  as.data.frame() |>
  igraph::graph_from_data_frame(directed = FALSE) |>
  igraph::as_adjacency_matrix(attr = "r", sparse = FALSE)
diag(pt_cors_llm) <- 1

cors_real <- holdout_llm %>%
  select(x = variable_1, y = variable_2, r = empirical_r) %>%
  as.data.frame() |>
  igraph::graph_from_data_frame(directed = FALSE) |>
  igraph::as_adjacency_matrix(attr = "r", sparse = FALSE)
diag(cors_real) <- 1

mapping_data <- holdout_mapping_data
items_by_scale <- bind_rows(
  holdout_scales %>% filter(scale_1 == "") %>% left_join(mapping_data %>% select(-scale_1), by = c("instrument", "scale_0")),
  holdout_scales %>% filter(scale_1 != "") %>% left_join(mapping_data, by = c("instrument", "scale_0", "scale_1"))
)
  
scales <- items_by_scale %>%
  group_by(keyed, scale) %>%
  summarise(
    items = list(variable),
    number_of_items = n_distinct(variable),
    lvn = paste(first(scale), " =~ ", paste(variable, collapse = " + "))) %>%
  drop_na() %>% 
  ungroup()


random_scales <- list()
for(i in 1:200) {
  n_items <- rpois(1, mean(scales$number_of_items))
  n_items <- if_else(n_items < 3, 3, n_items)
  random_scales[[i]] <- holdout_mapping_data %>%
    sample_n(n_items) %>%
    mutate(scale = paste0("random", i)) %>%
    group_by(scale) %>%
    summarise(
      items = list(variable),
      number_of_items = n_distinct(variable),
      lvn = paste(first(scale), " =~ ", paste(variable, collapse = " + "))) %>%
    drop_na() %>% 
    mutate(keyed = 1)
}

random_scales <- bind_rows(random_scales)
scales <- bind_rows(scales, random_scales)

source("global_functions.R")

scales <- scales %>% filter(number_of_items >= 3)

scales <- scales %>%
  rowwise() %>%
  mutate(r_real = list(cors_real[items, items]),
         pt_r_llm = list(pt_cors_llm[items, items]),
         r_llm = list(cors_llm[items, items])) %>%
  mutate(reverse_items = list(find_reverse_items_by_first_item(r_real, keyed)),
         r_real_rev = list(reverse_items(r_real, reverse_items)),
         pt_r_llm_rev = list(reverse_items(pt_r_llm, reverse_items)),
         r_llm_rev = list(reverse_items(r_llm, reverse_items))) %>%
  mutate(
    rel_real = list(psych::alpha(r_real_rev, keys = F, n.obs = N)$feldt),
    rel_llm = list(psych::alpha(r_llm_rev, keys = F, n.obs = N)$feldt),
    rel_pt_llm = list(psych::alpha(pt_r_llm_rev, keys = F, n.obs = N)$feldt)) %>%
  mutate(empirical_alpha = rel_real$alpha$raw_alpha,
         synthetic_alpha = rel_llm$alpha$raw_alpha,
         pt_synthetic_alpha = rel_pt_llm$alpha$raw_alpha) %>%
  mutate(
    empirical_alpha_se = mean(diff(unlist(psychometric::alpha.CI(empirical_alpha, k = number_of_items, N = N, level = 0.95))))
  )

scales <- scales %>% filter(empirical_alpha > 0)
# qplot(scales$empirical_alpha_se)
# qplot(scales$empirical_alpha, scales$empirical_alpha_se)
# qplot(scales$number_of_items, scales$empirical_alpha_se)
# qplot(scales$empirical_alpha, scales$empirical_alpha_se, color = scales$number_of_items)
```


### Accuracy
```{r}
se2 <- mean(scales$empirical_alpha_se^2)
r <- broom::tidy(cor.test(scales$empirical_alpha, scales$synthetic_alpha))
pt_r <- broom::tidy(cor.test(scales$empirical_alpha, scales$pt_synthetic_alpha))

model <- paste0('
  # Latent variables
  latent_real_rel =~ 1*empirical_alpha

  # Fixing error variances based on known standard errors
  empirical_alpha ~~ ',se2,'*empirical_alpha

  # Relationship between latent variables
  latent_real_rel ~~ synthetic_alpha
')

fit <- sem(model, data = scales)
pt_fit <- sem(model, data = scales %>% 
                select(empirical_alpha, synthetic_alpha = pt_synthetic_alpha))

m_lmsynth_rel_scales <- brm(
  bf(empirical_alpha | mi(empirical_alpha_se) ~ synthetic_alpha,
     sigma ~ poly(synthetic_alpha, degree = 3)), data = scales, 
  file = "ignore/m_synth_rel_lm")

newdata <- m_lmsynth_rel_scales$data %>% select(empirical_alpha, synthetic_alpha, empirical_alpha_se)
epreds <- epred_draws(newdata = newdata, obj = m_lmsynth_rel_scales, re_formula = NA)
preds <- predicted_draws(newdata = newdata, obj = m_lmsynth_rel_scales, re_formula = NA)
epred_preds <- epreds %>% left_join(preds)
by_draw <- epred_preds %>% group_by(.draw) %>% 
  summarise(.epred = var(.epred),
            .prediction = var(.prediction),
            sigma = sqrt(.prediction - .epred),
            semi_latent_r = sqrt(.epred/.prediction))

accuracy_bayes_rels <- by_draw %>% mean_hdci(semi_latent_r)

bind_rows(
  pt_r %>% 
    mutate(model = "pre-trained", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(pt_fit) %>% 
    filter(lhs == "latent_real_rel", rhs ==  "synthetic_alpha") %>% 
    mutate(model = "pre-trained", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  r %>% 
    mutate(model = "fine-tuned", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(fit) %>% 
    filter(lhs == "latent_real_rel", rhs ==  "synthetic_alpha") %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  accuracy_bayes_rels %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (Bayesian EIV)") %>% 
    select(model, kind, accuracy = semi_latent_r, conf.low = .lower, conf.high = .upper)
  ) %>% 
  knitr::kable(digits = 2)
```


<details><summary><h4>Prediction error plot according to synthetic estimate</h4></summary>

```{r}
m_lmsynth_rel_scales

pred <- conditional_effects(m_lmsynth_rel_scales, method = "predict")
by_draw %>% 
  filter(!is.nan(sigma)) %>% mean_hdci(sigma)

plot(conditional_effects(m_lmsynth_rel_scales, dpar = "sigma"), plot = F)[[1]] + 
  theme_bw() + 
  scale_x_continuous(limits = c(0,1))+
  scale_y_continuous(limits = c(0,0.13)) +
  ylab("Prediction error (sigma)")
```

</details>




### Scatter plot
```{r}
ggplot(scales, aes(synthetic_alpha, empirical_alpha, 
                   color = str_detect(scale, "^random"), 
              ymin = empirical_alpha - empirical_alpha_se,
              ymax = empirical_alpha + empirical_alpha_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(alpha = 0.6, size = 1) +
  geom_smooth(aes(
    x = synthetic_alpha,
    y = estimate__,
    ymin = lower__,
    ymax = upper__,
  ), stat = "identity", 
  color = "#a48500",
  fill = "#EDC951",
  data = as.data.frame(pred$synthetic_alpha)) +
  scale_color_manual(values = c("#00A0B0", "#6A4A3C"),
                     guide = "none") +
  xlab("Synthetic Cronbach's alpha") + 
  ylab("Empirical Cronbach's alpha") +
  theme_bw() +
  coord_fixed(xlim = c(0,1), ylim = c(0,1)) -> plot_rels
plot_rels
```

### Interactive plot
```{r}
(scales %>% 
  filter(!str_detect(scale, "^random")) %>% 
  mutate(synthetic_alpha = round(synthetic_alpha, 2),
         empirical_alpha = round(empirical_alpha, 2),
         scale = str_replace_all(scale, "_+", " ")) %>% 
ggplot(., aes(synthetic_alpha, empirical_alpha, 
              # ymin = empirical_r - empirical_r_se, 
              # ymax = empirical_r + empirical_r_se, 
              label = scale)) + 
  geom_abline(linetype = "dashed") +
  geom_point(alpha = 0.3, size = 1, color = "#00A0B0") +
  xlab("Synthetic Cronbach's alpha") + 
  ylab("Empirical Cronbach's alpha") +
  theme_bw() +
  theme(legend.position='none') + 
  coord_fixed(xlim = c(NA,1), ylim = c(NA,1))) %>% 
  ggplotly()
```

<details><summary><h4>Table</h4></summary>

```{r}
scales %>% 
  filter(!str_detect(scale, "^random")) %>% 
  mutate(empirical_alpha = sprintf("%.2f±%.3f", empirical_alpha,
                               empirical_alpha_se),
         synthetic_alpha = sprintf("%.2f", synthetic_alpha),
         scale = str_replace_all(scale, "_+", " ")
         ) %>% 
  select(scale, empirical_alpha, synthetic_alpha, number_of_items) %>% 
  DT::datatable(rownames = FALSE,
                filter = "top")
```

</details>



<details><summary><h3>Robustness checks</h3></summary>

```{r}
scales %>% 
  group_by(str_detect(scale, "^random")) %>% 
  summarise(broom::tidy(cor.test(synthetic_alpha, empirical_alpha)), sd_alpha = sd(empirical_alpha), n = n()) %>% 
  knitr::kable()
```


Although the number of items alone can of course predict Cronbach's alpha, the synthetic alphas explain much more variance in empirical alphas.

```{r}
scales %>% 
  ungroup() %>% 
  summarise(broom::tidy(cor.test(number_of_items, empirical_alpha)), sd_alpha = sd(empirical_alpha), n = n()) %>% 
  knitr::kable()


summary(lm(empirical_alpha ~ number_of_items, scales))
summary(lm(empirical_alpha ~ number_of_items + synthetic_alpha, scales))
```



Is the accuracy lower for the pre-trained model?

```{r}
ggplot(scales, aes(pt_synthetic_alpha, empirical_alpha, 
                   color = str_detect(scale, "^random"), 
              ymin = empirical_alpha - empirical_alpha_se,
              ymax = empirical_alpha + empirical_alpha_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(alpha = 0.6, size = 1) +
  scale_color_manual(values = c("#00A0B0", "#6A4A3C"),
                     guide = "none") +
  xlab("Synthetic Cronbach's alpha") + 
  ylab("Empirical Cronbach's alpha") +
  theme_bw() +
  coord_fixed(xlim = c(0,1), ylim = c(0,1)) -> pt_plot_rels
pt_plot_rels
```


</details>




## Synthetic Scale Correlations
```{r}
manifest_scores = arrow::read_feather(file = file.path(data_path, glue("ignore.{model_name}.raw.osf-bainbridge-2021-s2-0.scale_correlations.feather")))
pt_manifest_scores = arrow::read_feather(file = file.path(data_path, glue("ignore.{pretrained_model_name}.raw.osf-bainbridge-2021-s2-0.scale_correlations.feather")))

n_distinct(manifest_scores$scale_a)

manifest_scores <- manifest_scores %>%
 left_join(scales, by = c("scale_a" = "scale")) %>%
 left_join(scales, by = c("scale_b" = "scale"))
```

### Accuracy
```{r}
r <- broom::tidy(cor.test(manifest_scores$empirical_r, manifest_scores$synthetic_r))
pt_r <- broom::tidy(cor.test(pt_manifest_scores$empirical_r, pt_manifest_scores$synthetic_r))

se2 <- mean(manifest_scores$empirical_r_se^2)
model <- paste0('
    # Latent variables
    PearsonLatent =~ 1*empirical_r

    # Fixing error variances based on known standard errors
    empirical_r ~~ ',se2,'*empirical_r

    # Relationship between latent variables
    PearsonLatent ~~ synthetic_r
  ')

fit <- sem(model, data = manifest_scores)
pt_fit <- sem(model, data = pt_manifest_scores)


m_lmsynth_r_scales <- brm(
  bf(empirical_r | mi(empirical_r_se) ~ synthetic_r + (1|mm(scale_a, scale_b)),
     sigma ~ poly(synthetic_r, degree = 3)), data = manifest_scores, 
  file = "ignore/m_synth_r_scales_lm8")

sd_synth <- sd(m_lmsynth_r_scales$data$synthetic_r)


newdata <- m_lmsynth_r_scales$data %>% select(empirical_r, synthetic_r, empirical_r_se)
epreds <- epred_draws(newdata = newdata, obj = m_lmsynth_r_scales, re_formula = NA)
preds <- predicted_draws(newdata = newdata, obj = m_lmsynth_r_scales, re_formula = NA)
epred_preds <- epreds %>% left_join(preds)
by_draw <- epred_preds %>% group_by(.draw) %>% 
  summarise(.epred = var(.epred),
            .prediction = var(.prediction),
            sigma = sqrt(.prediction - .epred),
            semi_latent_r = sqrt(.epred/.prediction))

accuracy_bayes_scales <- by_draw %>% mean_hdci(semi_latent_r)

bind_rows(
  pt_r %>% 
    mutate(model = "pre-trained", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(pt_fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "pre-trained", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  r %>% 
    mutate(model = "fine-tuned", kind = "manifest") %>% 
    select(model, kind, accuracy = estimate, conf.low, conf.high),
  standardizedsolution(fit) %>% 
    filter(lhs == "PearsonLatent", rhs ==  "synthetic_r") %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (SEM)") %>% 
    select(model, kind, accuracy = est.std, 
           conf.low = ci.lower, conf.high = ci.upper),
  accuracy_bayes_scales %>% 
    mutate(model = "fine-tuned", kind = "semi-latent (Bayesian EIV)") %>% 
    select(model, kind, accuracy = semi_latent_r, conf.low = .lower, conf.high = .upper)
  ) %>% 
  knitr::kable(digits = 2)
```


<details><summary><h4>Prediction error plot according to synthetic estimate</h4></summary>

```{r}
m_lmsynth_r_scales
by_draw %>% mean_hdci(sigma)

pred <- conditional_effects(m_lmsynth_r_scales, method = "predict")
plot(conditional_effects(m_lmsynth_r_scales, dpar = "sigma"), plot = F)[[1]] + 
  theme_bw() + 
  ylab("Prediction error (sigma)")
```


</details>



### Scatter plot
```{r}
ggplot(manifest_scores, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  geom_smooth(aes(
    x = synthetic_r,
    y = estimate__,
    ymin = lower__,
    ymax = upper__,
  ), stat = "identity", 
  color = "#a48500",
  fill = "#EDC951",
  data = as.data.frame(pred$synthetic_r)) +
  xlab("Synthetic inter-scale correlation") + 
  ylab("Empirical inter-scale correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> plot_scales
plot_scales
```

### Interactive plot
```{r}
(manifest_scores %>% 
  mutate(synthetic_r = round(synthetic_r, 2),
         empirical_r = round(empirical_r, 2),
         scales = str_replace_all(str_c(scale_a, "\n", scale_b),
                                  "_+", " ")) %>% 
ggplot(., aes(synthetic_r, empirical_r, 
              # ymin = empirical_r - empirical_r_se, 
              # ymax = empirical_r + empirical_r_se, 
              label = scales)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  xlab("Synthetic inter-scale correlation") + 
  ylab("Empirical inter-scale correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1))) %>% 
  ggplotly()
```

<details><summary>Table</summary>

```{r}
manifest_scores %>% 
                mutate(empirical_r = sprintf("%.2f±%.3f", empirical_r,
                                             empirical_r_se),
                       synthetic_r = sprintf("%.2f", synthetic_r),
                       scale_a = str_replace_all(scale_a, "_+", " "),
                       scale_b = str_replace_all(scale_b, "_+", " ")
                       ) %>% 
                select(scale_a, scale_b, empirical_r, synthetic_r) %>% 
  DT::datatable(rownames = FALSE,
                filter = "top")
```

</details>




<details><summary><h3>Robustness checks</h3></summary>

How does number of items across the two scales relate to accuracy?

```{r}
by_item_number <- manifest_scores %>%
  mutate(items = number_of_items.x + number_of_items.y) %>%
  group_by(items) %>%
  summarise(broom::tidy(cor.test(empirical_r, synthetic_r)), pairwise_n = n()) 

by_item_number %>% 
  ggplot(aes(items, estimate, ymin = conf.low, ymax = conf.high)) + 
  geom_pointrange() +
  scale_y_continuous("Manifest accuracy (with 95% confidence interval)") +
  xlab("Number of items summed across scales")

lm(estimate ~ items, by_item_number, weights = 1/(by_item_number$conf.high-by_item_number$conf.low))

manifest_scores %>%
  filter(number_of_items.x >= 10, number_of_items.y >= 10) %>%
  summarise(cor = cor(empirical_r, synthetic_r), n())
```


Is the accuracy lower for the pre-trained model?

```{r}
ggplot(pt_manifest_scores, aes(synthetic_r, empirical_r, 
              ymin = empirical_r - empirical_r_se,
              ymax = empirical_r + empirical_r_se)) + 
  geom_abline(linetype = "dashed") +
  geom_point(color = "#00A0B0", alpha = 0.3, size = 1) +
  xlab("Synthetic inter-scale correlation") + 
  ylab("Empirical inter-scale correlation") +
  theme_bw() +
  coord_fixed(xlim = c(-1,1), ylim = c(-1,1)) -> pt_plot_scales
pt_plot_scales
```


</details>




## Combined plot

```{r fig.width = 8.3, fig.height = 6}
library(patchwork)
pt_plot_items2 <- pt_plot_items +
    annotate("text", size = 2.5, x = 0.5, y = -0.8, vjust = 1, hjust = 1, label = "r(I fear for the worst,\nI never worry about anything)", color = "#00A0B0") +
  annotate("segment", x = 0, y = -0.78, xend = 0.2761906, yend = -0.3459711, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.1, y = 0.5, hjust = 1, label = "r(I get angry easily,\nI lose my temper)", color = "#00A0B0") + 
  annotate("segment", x = -.1, y = 0.5, xend = 0.6935711, yend = 0.7140546, color = "#00A0B0", alpha = 0.7)
  

plot_items2 <- plot_items +
  annotate("text", size = 3, x = -1, y = 0.98, vjust = 0, hjust = 0, label = with(accuracy_bayes_items, { sprintf("accuracy = %.2f [%.2f;%.2f]", semi_latent_r, .lower, .upper) })) +
  annotate("text", size = 2.5, x = 0.5, y = -0.8, vjust = 1, hjust = 1, label = "r(I fear for the worst,\nI never worry about anything)", color = "#00A0B0") +
  annotate("segment", x = 0, y = -0.78, xend = -0.1104686, yend = -0.3459711, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.1, y = 0.5, hjust = 1, label = "r(I get angry easily,\nI lose my temper)", color = "#00A0B0") + 
  annotate("segment", x = -.1, y = 0.5, xend = 0.8031979, yend = 0.7140546, color = "#00A0B0", alpha = 0.7)
  # annotate("text", size = 2.5, x = -0.5, y = -0.8, hjust = 0, label = "r(I love life,\nI avoid crowds)", color = "#00A0B0") +
  # annotate("segment", x = -.3, y = -0.7, xend = -0.23, yend = -0.28, color = "#00A0B0", alpha = 0.7) +
  # annotate("text", size = 2.5, x = -.3, y = 0.5, hjust = 1, label = "r(I work hard,\nI am diligent)", color = "#00A0B0") +
  # annotate("segment", x = -.3, y = .5, xend = .52, yend = .58, color = "#00A0B0", alpha = 0.7)


pt_plot_rels2 <- pt_plot_rels + 
  annotate("text", size = 2.5, x = 0.61, y = 0.25, hjust = 0, label = "IPIP Extraversion", color = "#00A0B0") +
  annotate("segment", x = 0.7, y = 0.27, xend = 0.9, yend = 0.90, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -0.02, y = 0.95, hjust = 0, label = "LOT Optimism", color = "#00A0B0") +
  annotate("segment", x = 0.11, y = 0.93, xend = -1.477226, yend = 0.71, color = "#00A0B0", alpha = 0.7)

plot_rels2 <- plot_rels + 
  annotate("text", size = 3, x = 0, y = .985, vjust = 0, hjust = 0, label = with(accuracy_bayes_rels, { sprintf("accuracy = %.2f [%.2f;%.2f]", semi_latent_r, .lower, .upper) })) +
  annotate("text", size = 2.5, x = 0.4, y = 0.1, hjust = 0, label = "randomly formed scales", color = "#6A4A3C") +
  annotate("segment", x = 0.395, y = 0.1, xend = 0.39, yend = 0.4864297, color = "#6A4A3C", alpha = 0.7) +
  annotate("segment", x = 0.395, y = 0.1, xend = 0.285, yend = 0.2279919, color = "#6A4A3C", alpha = 0.7) +
  annotate("segment", x = 0.395, y = 0.1, xend = 0.2, yend = 0.075, color = "#6A4A3C", alpha = 0.7) +
  annotate("text", size = 2.5, x = 0.61, y = 0.4, hjust = 0, label = "IPIP Extraversion", color = "#00A0B0") +
  annotate("segment", x = 0.75, y = 0.42, xend = 0.87, yend = 0.90, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -0.02, y = 0.86, hjust = 0, label = "LOT Optimism", color = "#00A0B0") +
  annotate("segment", x = 0.11, y = 0.83, xend = 0.15, yend = 0.71, color = "#00A0B0", alpha = 0.7)

pt_plot_scales2 <- pt_plot_scales +
  annotate("text", size = 2.5, x = -0.2, y = 0.8, hjust = 1, label = "r(BFI Neuroticism,\nIPIP Neuroticism)", color = "#00A0B0") +
  annotate("segment", x = -.2, y = 0.8, xend = 0.22, yend = .84, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.1, y = -0.9, hjust = 0, label = "r(BFI Depression facet,\nLOT Optimism)", color = "#00A0B0") +
  annotate("segment", x = -.1, y = -.9, xend = -0.17, yend = -.63, color = "#00A0B0", alpha = 0.7)


plot_scales2 <- plot_scales +
  annotate("text", size = 3, x = -1, y = 0.98, vjust = 0, hjust = 0, label = with(accuracy_bayes_scales, { sprintf("accuracy = %.2f [%.2f;%.2f]", semi_latent_r, .lower, .upper) })) +
  annotate("text", size = 2.5, x = -0.1, y = 0.5, hjust = 1, label = "r(BFI Neuroticism,\nIPIP Neuroticism)", color = "#00A0B0") +
  annotate("segment", x = -.1, y = 0.5, xend = .84, yend = .84, color = "#00A0B0", alpha = 0.7) +
  annotate("text", size = 2.5, x = -.15, y = -0.7, hjust = 0, label = "r(BFI Depression facet,\nLOT Optimism)", color = "#00A0B0") +
  annotate("segment", x = -.15, y = -.7, xend = -.34, yend = -.63, color = "#00A0B0", alpha = 0.7)


(pt_plot_items2+
    pt_plot_rels2 + ggtitle("") +
    pt_plot_scales2) /


(plot_items2 +
    plot_rels2 + ggtitle("SurveyBot 3000")  +
    plot_scales2) +
  plot_annotation(
  title = 'Pre-trained model before domain adaptation and fine-tuning'
)

ggsave("ignore/Figure_pilot.pdf", width = 8.3, height = 6, device = grDevices::cairo_pdf)
ggsave("ignore/Figure_pilot.png", width = 8.3, height = 6)



# ggsave("ignore/Figure_pilot.svg", width = 8.3, height = 5.5, device = svglite::svglite)
```

