# twitter tsuffs require(twitteR) require(plyr) # ADD STUFF install.packages(c("devtools", "rjson", "bit64", "httr")) #RESTART R session! library(devtools) install_github("twitteR", username = "geoffjentry") library(twitteR) # SIGN IN api_key <- "c9d9pLSiwpnZa5ghQqc3VSKO0" api_secret <- "dDplH1upTc6zXrXF8GOoHRG5bJwVPb5tWYPkakpvHq138Wa6qN" access_token <- "101609102-f0hXebcRZNEwZTGqn6uBoK8TdOlvi2b8q4mrOcOH" access_token_secret <- "1pHo0cvIcqlG5PhzULhnC9JFzEhzdfdDopsQF8wUnQL37" setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret) # the most frequent 100 English words that contain AO (open o), accorind to the CMU pronuncing dictionary: # for # on # or # all # your # also # want # because # search all varient spellings from CMU pronuncing dictionary words <- c("awn", "awr", "awll", "yawr", "awlso", "wawnt", "becawse") TwitterData <- NULL for(i in 1:length(words)){ word <- searchTwitter(words[i], n=100, lang = "en") word.df = do.call("rbind",lapply(word,as.data.frame)) TwitterData <- rbind(TwitterData, word.df) } write.csv(TwitterData, "TwitterData.csv") # now to do the same with Scottish Data words <- c("dae", "tae", "whae", "yae") TwitterData <- NULL for(i in 1:length(words)){ word <- searchTwitter(words[i], n=100, lang = "en") word.df = do.call("rbind",lapply(word,as.data.frame)) TwitterData <- rbind(TwitterData, word.df) } write.csv(TwitterData, "scotsTwitterData.csv")