install.packages(c("ggm", "gmodels", "vcd", "Hmisc", "pastecs", "psych", "doBy"))
mt <- mtcars[c("mpg", "hp", "wt", "am")]
head(mt)
## mpg hp wt am
## Mazda RX4 21.0 110 2.620 1
## Mazda RX4 Wag 21.0 110 2.875 1
## Datsun 710 22.8 93 2.320 1
## Hornet 4 Drive 21.4 110 3.215 0
## Hornet Sportabout 18.7 175 3.440 0
## Valiant 18.1 105 3.460 0
mt <- mtcars[c("mpg", "hp", "wt", "am")]
summary(mt)
## mpg hp wt am
## Min. :10.4 Min. : 52.0 Min. :1.51 Min. :0.000
## 1st Qu.:15.4 1st Qu.: 96.5 1st Qu.:2.58 1st Qu.:0.000
## Median :19.2 Median :123.0 Median :3.33 Median :0.000
## Mean :20.1 Mean :146.7 Mean :3.22 Mean :0.406
## 3rd Qu.:22.8 3rd Qu.:180.0 3rd Qu.:3.61 3rd Qu.:1.000
## Max. :33.9 Max. :335.0 Max. :5.42 Max. :1.000
可以使用apply()
函数或sapply()
函数计算所选择的任意描述性统计
sapply(x, FUN, options)
插入的典型函数有mean()、sd()、var()、min()、max()、median()
mystats <- function(x, na.omit=FALSE){
if (na.omit)
x <- x[!is.na(x)]
m <- mean(x)
n <- length(x)
s <- sd(x)
skew <- sum((x-m)^3/s^3)/n
kurt <- sum((x-m)^4/s^4)/n - 3
return(c(n=n, mean=m, stdev=s, skew=skew, kurtosis=kurt))
}
myvars <- c("mpg", "hp", "wt")
sapply(mtcars[myvars], mystats)
## mpg hp wt
## n 32.0000 32.0000 32.00000
## mean 20.0906 146.6875 3.21725
## stdev 6.0269 68.5629 0.97846
## skew 0.6107 0.7260 0.42315
## kurtosis -0.3728 -0.1356 -0.02271
library(Hmisc)
myvars <- c("mpg", "hp", "wt")
describe(mtcars[myvars])
## mtcars[myvars]
##
## 3 Variables 32 Observations
## ---------------------------------------------------------------------------
## mpg
## n missing unique Info Mean .05 .10 .25 .50
## 32 0 25 1 20.09 12.00 14.34 15.43 19.20
## .75 .90 .95
## 22.80 30.09 31.30
##
## lowest : 10.4 13.3 14.3 14.7 15.0, highest: 26.0 27.3 30.4 32.4 33.9
## ---------------------------------------------------------------------------
## hp
## n missing unique Info Mean .05 .10 .25 .50
## 32 0 22 1 146.7 63.65 66.00 96.50 123.00
## .75 .90 .95
## 180.00 243.50 253.55
##
## lowest : 52 62 65 66 91, highest: 215 230 245 264 335
## ---------------------------------------------------------------------------
## wt
## n missing unique Info Mean .05 .10 .25 .50
## 32 0 29 1 3.217 1.736 1.956 2.581 3.325
## .75 .90 .95
## 3.610 4.048 5.293
##
## lowest : 1.513 1.615 1.835 1.935 2.140
## highest: 3.845 4.070 5.250 5.345 5.424
## ---------------------------------------------------------------------------
library(pastecs)
myvars <- c("mpg", "hp", "wt")
stat.desc(mtcars[myvars])
## mpg hp wt
## nbr.val 32.000 32.0000 32.0000
## nbr.null 0.000 0.0000 0.0000
## nbr.na 0.000 0.0000 0.0000
## min 10.400 52.0000 1.5130
## max 33.900 335.0000 5.4240
## range 23.500 283.0000 3.9110
## sum 642.900 4694.0000 102.9520
## median 19.200 123.0000 3.3250
## mean 20.091 146.6875 3.2172
## SE.mean 1.065 12.1203 0.1730
## CI.mean.0.95 2.173 24.7196 0.3528
## var 36.324 4700.8669 0.9574
## std.dev 6.027 68.5629 0.9785
## coef.var 0.300 0.4674 0.3041
library(psych)
myvars <- c("mpg", "hp", "wt")
describe(mtcars[myvars])
## vars n mean sd median trimmed mad min max range skew
## mpg 1 32 20.09 6.03 19.20 19.70 5.41 10.40 33.90 23.50 0.61
## hp 2 32 146.69 68.56 123.00 141.19 77.10 52.00 335.00 283.00 0.73
## wt 3 32 3.22 0.98 3.33 3.15 0.77 1.51 5.42 3.91 0.42
## kurtosis se
## mpg -0.37 1.07
## hp -0.14 12.12
## wt -0.02 0.17
myvars <- c("mpg", "hp", "wt")
aggregate(mtcars[myvars], by=list(am=mtcars$am), mean)
## am mpg hp wt
## 1 0 17.15 160.3 3.769
## 2 1 24.39 126.8 2.411
aggregate(mtcars[myvars], by=list(am=mtcars$am), sd)
## am mpg hp wt
## 1 0 3.834 53.91 0.7774
## 2 1 6.167 84.06 0.6170
这里,dstats()调用了前面的mystats()函数:
dstats <- function(x)sapply(x, mystats)
myvars <- c("mpg", "hp", "wt")
by(mtcars[myvars], mtcars$am, dstats)
## mtcars$am: 0
## mpg hp wt
## n 19.00000 19.00000 19.0000
## mean 17.14737 160.26316 3.7689
## stdev 3.83397 53.90820 0.7774
## skew 0.01395 -0.01423 0.9759
## kurtosis -0.80318 -1.20970 0.1416
## --------------------------------------------------------
## mtcars$am: 1
## mpg hp wt
## n 13.00000 13.0000 13.0000
## mean 24.39231 126.8462 2.4110
## stdev 6.16650 84.0623 0.6170
## skew 0.05256 1.3599 0.2103
## kurtosis -1.45535 0.5635 -1.1737
library(doBy)
summaryBy(mpg+hp+wt~am, data=mtcars, FUN=mystats)
## am mpg.n mpg.mean mpg.stdev mpg.skew mpg.kurtosis hp.n hp.mean hp.stdev
## 1 0 19 17.15 3.834 0.01395 -0.8032 19 160.3 53.91
## 2 1 13 24.39 6.167 0.05256 -1.4554 13 126.8 84.06
## hp.skew hp.kurtosis wt.n wt.mean wt.stdev wt.skew wt.kurtosis
## 1 -0.01423 -1.2097 19 3.769 0.7774 0.9759 0.1416
## 2 1.35989 0.5635 13 2.411 0.6170 0.2103 -1.1737
library(psych)
myvars <- c("mpg", "hp", "wt")
describeBy(mtcars[myvars], list(am=mtcars$am))
## $`0`
## vars n mean sd median trimmed mad min max range skew
## mpg 1 19 17.15 3.83 17.30 17.12 3.11 10.40 24.40 14.00 0.01
## hp 2 19 160.26 53.91 175.00 161.06 77.10 62.00 245.00 183.00 -0.01
## wt 3 19 3.77 0.78 3.52 3.75 0.45 2.46 5.42 2.96 0.98
## kurtosis se
## mpg -0.80 0.88
## hp -1.21 12.37
## wt 0.14 0.18
##
## $`1`
## vars n mean sd median trimmed mad min max range skew
## mpg 1 13 24.39 6.17 22.80 24.38 6.67 15.00 33.90 18.90 0.05
## hp 2 13 126.85 84.06 109.00 114.73 63.75 52.00 335.00 283.00 1.36
## wt 3 13 2.41 0.62 2.32 2.39 0.68 1.51 3.57 2.06 0.21
## kurtosis se
## mpg -1.46 1.71
## hp 0.56 23.31
## wt -1.17 0.17
##
## attr(,"call")
## by.data.frame(data = x, INDICES = group, FUN = describe, type = type)
describeBy()函数不允许指定任意函数
reshape
包计算概述统计量library(reshape)
dstats <- function(x)(c(n=length(x), mean=mean(x), sd=sd(x)))
dfm <- melt(mtcars, measure.vars=c("mpg", "hp", "wt"),
id.vars=c("am", "cyl"))
cast(dfm, am + cyl + variable ~ ., dstats)
## am cyl variable n mean sd
## 1 0 4 mpg 3 22.900 1.4526
## 2 0 4 hp 3 84.667 19.6554
## 3 0 4 wt 3 2.935 0.4075
## 4 0 6 mpg 4 19.125 1.6317
## 5 0 6 hp 4 115.250 9.1788
## 6 0 6 wt 4 3.389 0.1162
## 7 0 8 mpg 12 15.050 2.7744
## 8 0 8 hp 12 194.167 33.3598
## 9 0 8 wt 12 4.104 0.7683
## 10 1 4 mpg 8 28.075 4.4839
## 11 1 4 hp 8 81.875 22.6554
## 12 1 4 wt 8 2.042 0.4093
## 13 1 6 mpg 3 20.567 0.7506
## 14 1 6 hp 3 131.667 37.5278
## 15 1 6 wt 3 2.755 0.1282
## 16 1 8 mpg 2 15.400 0.5657
## 17 1 8 hp 2 299.500 50.2046
## 18 1 8 wt 2 3.370 0.2828
# frequency tables
library(vcd)
head(Arthritis)
## ID Treatment Sex Age Improved
## 1 57 Treated Male 27 Some
## 2 46 Treated Male 29 None
## 3 77 Treated Male 30 None
## 4 17 Treated Male 32 Marked
## 5 36 Treated Male 46 Marked
## 6 23 Treated Male 58 Marked
# one way table
mytable <- with(Arthritis, table(Improved))
mytable # frequencies
## Improved
## None Some Marked
## 42 14 28
prop.table(mytable) # proportions
## Improved
## None Some Marked
## 0.5000 0.1667 0.3333
prop.table(mytable)*100 # percentages
## Improved
## None Some Marked
## 50.00 16.67 33.33
xtabs()函数还可使用公式风格的输入创建列联表
mytable <- xtabs(~ Treatment+Improved, data=Arthritis)
mytable # frequencies
## Improved
## Treatment None Some Marked
## Placebo 29 7 7
## Treated 13 7 21
margin.table(mytable,1) #row sums
## Treatment
## Placebo Treated
## 43 41
margin.table(mytable, 2) # column sums
## Improved
## None Some Marked
## 42 14 28
prop.table(mytable) # cell proportions
## Improved
## Treatment None Some Marked
## Placebo 0.34524 0.08333 0.08333
## Treated 0.15476 0.08333 0.25000
prop.table(mytable, 1) # row proportions
## Improved
## Treatment None Some Marked
## Placebo 0.6744 0.1628 0.1628
## Treated 0.3171 0.1707 0.5122
prop.table(mytable, 2) # column proportions
## Improved
## Treatment None Some Marked
## Placebo 0.6905 0.5000 0.2500
## Treated 0.3095 0.5000 0.7500
addmargins(mytable) # add row and column sums to table
## Improved
## Treatment None Some Marked Sum
## Placebo 29 7 7 43
## Treated 13 7 21 41
## Sum 42 14 28 84
# more complex tables
addmargins(prop.table(mytable))
## Improved
## Treatment None Some Marked Sum
## Placebo 0.34524 0.08333 0.08333 0.51190
## Treated 0.15476 0.08333 0.25000 0.48810
## Sum 0.50000 0.16667 0.33333 1.00000
addmargins(prop.table(mytable, 1), 2)
## Improved
## Treatment None Some Marked Sum
## Placebo 0.6744 0.1628 0.1628 1.0000
## Treated 0.3171 0.1707 0.5122 1.0000
addmargins(prop.table(mytable, 2), 1)
## Improved
## Treatment None Some Marked
## Placebo 0.6905 0.5000 0.2500
## Treated 0.3095 0.5000 0.7500
## Sum 1.0000 1.0000 1.0000
使用CrossTable生成二维列联表
library(gmodels)
CrossTable(Arthritis$Treatment, Arthritis$Improved)
##
##
## Cell Contents
## |-------------------------|
## | N |
## | Chi-square contribution |
## | N / Row Total |
## | N / Col Total |
## | N / Table Total |
## |-------------------------|
##
##
## Total Observations in Table: 84
##
##
## | Arthritis$Improved
## Arthritis$Treatment | None | Some | Marked | Row Total |
## --------------------|-----------|-----------|-----------|-----------|
## Placebo | 29 | 7 | 7 | 43 |
## | 2.616 | 0.004 | 3.752 | |
## | 0.674 | 0.163 | 0.163 | 0.512 |
## | 0.690 | 0.500 | 0.250 | |
## | 0.345 | 0.083 | 0.083 | |
## --------------------|-----------|-----------|-----------|-----------|
## Treated | 13 | 7 | 21 | 41 |
## | 2.744 | 0.004 | 3.935 | |
## | 0.317 | 0.171 | 0.512 | 0.488 |
## | 0.310 | 0.500 | 0.750 | |
## | 0.155 | 0.083 | 0.250 | |
## --------------------|-----------|-----------|-----------|-----------|
## Column Total | 42 | 14 | 28 | 84 |
## | 0.500 | 0.167 | 0.333 | |
## --------------------|-----------|-----------|-----------|-----------|
##
##
table()
和xtabs()
都可以基于三个或更多的类别型变量生成多维列联表
mytable <- xtabs(~ Treatment+Sex+Improved, data=Arthritis)
mytable
## , , Improved = None
##
## Sex
## Treatment Female Male
## Placebo 19 10
## Treated 6 7
##
## , , Improved = Some
##
## Sex
## Treatment Female Male
## Placebo 7 0
## Treated 5 2
##
## , , Improved = Marked
##
## Sex
## Treatment Female Male
## Placebo 6 1
## Treated 16 5
ftable(mytable)
## Improved None Some Marked
## Treatment Sex
## Placebo Female 19 7 6
## Male 10 0 1
## Treated Female 6 5 16
## Male 7 2 5
margin.table(mytable, 1)
## Treatment
## Placebo Treated
## 43 41
margin.table(mytable, 2)
## Sex
## Female Male
## 59 25
margin.table(mytable, 2)
## Sex
## Female Male
## 59 25
margin.table(mytable, c(1,3))
## Improved
## Treatment None Some Marked
## Placebo 29 7 7
## Treated 13 7 21
ftable(prop.table(mytable, c(1,2)))
## Improved None Some Marked
## Treatment Sex
## Placebo Female 0.59375 0.21875 0.18750
## Male 0.90909 0.00000 0.09091
## Treated Female 0.22222 0.18519 0.59259
## Male 0.50000 0.14286 0.35714
ftable(addmargins(prop.table(mytable, c(1, 2)), 3))
## Improved None Some Marked Sum
## Treatment Sex
## Placebo Female 0.59375 0.21875 0.18750 1.00000
## Male 0.90909 0.00000 0.09091 1.00000
## Treated Female 0.22222 0.18519 0.59259 1.00000
## Male 0.50000 0.14286 0.35714 1.00000
# Listing 7.12 - Chi-square test of independence
library(vcd)
mytable <- xtabs(~Treatment+Improved, data=Arthritis)
chisq.test(mytable)
##
## Pearson's Chi-squared test
##
## data: mytable
## X-squared = 13, df = 2, p-value = 0.001
mytable <- xtabs(~Improved+Sex, data=Arthritis)
chisq.test(mytable)
##
## Pearson's Chi-squared test
##
## data: mytable
## X-squared = 4.8, df = 2, p-value = 0.09
Fisher精确检验的原假设是:边界固定的列联表中行和列是相互独立的
mytable <- xtabs(~Treatment+Improved, data=Arthritis)
fisher.test(mytable)
##
## Fisher's Exact Test for Count Data
##
## data: mytable
## p-value = 0.001
## alternative hypothesis: two.sided
原假设是,两个名义变量在第三个变量的每一层中都是条件独立的。
mytable <- xtabs(~Treatment+Improved+Sex, data=Arthritis)
mantelhaen.test(mytable)
##
## Cochran-Mantel-Haenszel test
##
## data: mytable
## Cochran-Mantel-Haenszel M^2 = 15, df = 2, p-value = 7e-04
library(vcd)
mytable <- xtabs(~Treatment+Improved, data=Arthritis)
assocstats(mytable)
## X^2 df P(> X^2)
## Likelihood Ratio 13.530 2 0.0011536
## Pearson 13.055 2 0.0014626
##
## Phi-Coefficient : NA
## Contingency Coeff.: 0.367
## Cramer's V : 0.394
state.x77
数据集提供了美国50个州在1977年的人口、收入、文盲率、预期寿命、谋杀率和高中毕业率数据
states<- state.x77[,1:6]
cov(states)
## Population Income Illiteracy Life Exp Murder HS Grad
## Population 19931683.8 571229.8 292.8680 -407.8425 5663.524 -3551.510
## Income 571229.8 377573.3 -163.7020 280.6632 -521.894 3076.769
## Illiteracy 292.9 -163.7 0.3715 -0.4815 1.582 -3.235
## Life Exp -407.8 280.7 -0.4815 1.8020 -3.869 6.313
## Murder 5663.5 -521.9 1.5818 -3.8695 13.627 -14.550
## HS Grad -3551.5 3076.8 -3.2355 6.3127 -14.550 65.238
cor(states)
## Population Income Illiteracy Life Exp Murder HS Grad
## Population 1.00000 0.2082 0.1076 -0.06805 0.3436 -0.09849
## Income 0.20823 1.0000 -0.4371 0.34026 -0.2301 0.61993
## Illiteracy 0.10762 -0.4371 1.0000 -0.58848 0.7030 -0.65719
## Life Exp -0.06805 0.3403 -0.5885 1.00000 -0.7808 0.58222
## Murder 0.34364 -0.2301 0.7030 -0.78085 1.0000 -0.48797
## HS Grad -0.09849 0.6199 -0.6572 0.58222 -0.4880 1.00000
cor(states, method="spearman")
## Population Income Illiteracy Life Exp Murder HS Grad
## Population 1.0000 0.1246 0.3130 -0.1040 0.3457 -0.3834
## Income 0.1246 1.0000 -0.3146 0.3241 -0.2175 0.5105
## Illiteracy 0.3130 -0.3146 1.0000 -0.5554 0.6724 -0.6545
## Life Exp -0.1040 0.3241 -0.5554 1.0000 -0.7802 0.5239
## Murder 0.3457 -0.2175 0.6724 -0.7802 1.0000 -0.4367
## HS Grad -0.3834 0.5105 -0.6545 0.5239 -0.4367 1.0000
x <- states[,c("Population", "Income", "Illiteracy", "HS Grad")]
y <- states[,c("Life Exp", "Murder")]
cor(x,y)
## Life Exp Murder
## Population -0.06805 0.3436
## Income 0.34026 -0.2301
## Illiteracy -0.58848 0.7030
## HS Grad 0.58222 -0.4880
library(ggm)
## Loading required package: igraph
##
## Attaching package: 'igraph'
## The following objects are masked from 'package:stats':
##
## decompose, spectrum
## The following object is masked from 'package:base':
##
## union
##
## Attaching package: 'ggm'
## The following object is masked from 'package:igraph':
##
## pa
## The following object is masked from 'package:Hmisc':
##
## rcorr
# partial correlation of population and murder rate, controlling
# for income, illiteracy rate, and HS graduation rate
pcor(c(1,5,2,3,6), cov(states))
## [1] 0.3463
# Testing a correlation coefficient for significance
cor.test(states[,3], states[,5])
##
## Pearson's product-moment correlation
##
## data: states[, 3] and states[, 5]
## t = 6.8, df = 48, p-value = 1e-08
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.5279 0.8207
## sample estimates:
## cor
## 0.703
library(psych)
corr.test(states, use="complete")
## Call:corr.test(x = states, use = "complete")
## Correlation matrix
## Population Income Illiteracy Life Exp Murder HS Grad
## Population 1.00 0.21 0.11 -0.07 0.34 -0.10
## Income 0.21 1.00 -0.44 0.34 -0.23 0.62
## Illiteracy 0.11 -0.44 1.00 -0.59 0.70 -0.66
## Life Exp -0.07 0.34 -0.59 1.00 -0.78 0.58
## Murder 0.34 -0.23 0.70 -0.78 1.00 -0.49
## HS Grad -0.10 0.62 -0.66 0.58 -0.49 1.00
## Sample Size
## [1] 50
## Probability values (Entries above the diagonal are adjusted for multiple tests.)
## Population Income Illiteracy Life Exp Murder HS Grad
## Population 0.00 0.59 1.00 1.0 0.10 1
## Income 0.15 0.00 0.01 0.1 0.54 0
## Illiteracy 0.46 0.00 0.00 0.0 0.00 0
## Life Exp 0.64 0.02 0.00 0.0 0.00 0
## Murder 0.01 0.11 0.00 0.0 0.00 0
## HS Grad 0.50 0.00 0.00 0.0 0.00 0
##
## To see confidence intervals of the correlations, print with the short=FALSE option
library(MASS)
head(UScrime)
## M So Ed Po1 Po2 LF M.F Pop NW U1 U2 GDP Ineq Prob Time y
## 1 151 1 91 58 56 510 950 33 301 108 41 394 261 0.0846 26.2 791
## 2 143 0 113 103 95 583 1012 13 102 96 36 557 194 0.0296 25.3 1635
## 3 142 1 89 45 44 533 969 18 219 94 33 318 250 0.0834 24.3 578
## 4 136 0 121 149 141 577 994 157 80 102 39 673 167 0.0158 29.9 1969
## 5 141 0 121 109 101 591 985 18 30 91 20 578 174 0.0414 21.3 1234
## 6 121 0 110 118 115 547 964 25 44 84 29 689 126 0.0342 21.0 682
MASS
包中的UScrime
数据集包含了1960年美国47个州的刑罚制度对犯罪率影响的信息。我们感兴趣的结果变量为Prob(监禁的概率)、U1(14~24岁年龄段城市男性失业率)和U2(35~39岁年龄段城市男性失业率)。类别型变量So(指示该州是否位于南方的指示变量)将作为分组变量使用。
t.test(Prob ~ So, data=UScrime)
##
## Welch Two Sample t-test
##
## data: Prob by So
## t = -3.9, df = 25, p-value = 7e-04
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.03853 -0.01187
## sample estimates:
## mean in group 0 mean in group 1
## 0.03851 0.06371
# dependent t test
sapply(UScrime[c("U1","U2")], function(x)(c(mean=mean(x),sd=sd(x))))
## U1 U2
## mean 95.47 33.979
## sd 18.03 8.445
with(UScrime, t.test(U1, U2, paired=TRUE))
##
## Paired t-test
##
## data: U1 and U2
## t = 32, df = 46, p-value <2e-16
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## 57.67 65.31
## sample estimates:
## mean of the differences
## 61.49
with(UScrime, by(Prob, So, median))
## So: 0
## [1] 0.0382
## --------------------------------------------------------
## So: 1
## [1] 0.05555
wilcox.test(Prob ~ So, data=UScrime)
##
## Wilcoxon rank sum test
##
## data: Prob by So
## W = 81, p-value = 8e-05
## alternative hypothesis: true location shift is not equal to 0
sapply(UScrime[c("U1", "U2")], median)
## U1 U2
## 92 34
with(UScrime, wilcox.test(U1, U2, paired=TRUE))
##
## Wilcoxon signed rank test with continuity correction
##
## data: U1 and U2
## V = 1100, p-value = 2e-09
## alternative hypothesis: true location shift is not equal to 0
# Kruskal Wallis test
states <- data.frame(state.region, state.x77)
kruskal.test(Illiteracy ~ state.region, data=states)
##
## Kruskal-Wallis rank sum test
##
## data: Illiteracy by state.region
## Kruskal-Wallis chi-squared = 23, df = 3, p-value = 5e-05
source("http://www.statmethods.net/RiA/wmc.txt")
states <- data.frame(state.region, state.x77)
wmc(Illiteracy ~ state.region, data=states, method="holm")
## Descriptive Statistics
##
## West North Central Northeast South
## n 13.0000 12.0000 9.0000 16.000
## median 0.6000 0.7000 1.1000 1.750
## mad 0.1483 0.1483 0.2965 0.593
##
## Multiple Comparisons (Wilcoxon Rank Sum Tests)
## Probability Adjustment = holm
##
## Group.1 Group.2 W p
## 1 West North Central 88.0 8.666e-01
## 2 West Northeast 46.5 8.666e-01
## 3 West South 39.0 1.788e-02 *
## 4 North Central Northeast 20.5 5.360e-02 .
## 5 North Central South 2.0 8.052e-05 ***
## 6 Northeast South 18.0 1.188e-02 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
返回课程主页。