R语言做生信RNA-seqRNA-seq

RNA-seq分析:从fastq到差异表达基因

2018-12-06  本文已影响33人  高邮在逃咸鸭蛋

RNA-seq的数据分析是比较简单基础的分析,大概流程就是处理下机的fastq数据(trimmomatic),比对到人类基因组(hisat2)然后统计每个基因上出现的counts数(featureCounts),接下来在R里进行差异表达分析(DEseq2)找出差异表达基因再进行一些富集分析(clusterprofiler)。
因为前几天刚好处理了一批60个样本的RNA-seq数据,我把每一步都记录下来。
首先是在linux下进行处理,得到每个样本的counts数文件。

#!/bin/sh

fq_path=/work/data/lch_analysis
out_path="/work/analysis/lch_analysis"
trim_path="/opt/Trimmomatic-0.35"
hisat2_path="/opt/"
ht2_genome="/work/database/human/HISAT2/UCSC_hg19_Human"
gtf="/work/database/human/GTF/gencode.v27lift37.annotation.gtf"

cd $fq_path || exit 1
for file in $(ls | grep _1.fq.gz)
        do
        pre_name=${file%_good_1.fq.gz}
        mkdir -p $out_path/$pre_name/log
        fq1="${pre_name}_good_1.fq.gz"
        fq2="${pre_name}_good_2.fq.gz"

        # Step 0
        # Trim reads using Trimmomatic
        java -Xmx4g -jar \
        $trim_path/trimmomatic-0.35.jar \
        PE \
        -threads 4 \
        -phred33 \
        $fq1 \
        $fq2 \
        -baseout $out_path/$pre_name/${pre_name}_trimmed.fq.gz \
        ILLUMINACLIP:$trim_path/adapters/TruSeq2-PE.fa:2:30:10:6:true \
        SLIDINGWINDOW:4:15 \
        MINLEN:51 \
                2> $out_path/$pre_name/log/${pre_name}_Trimmomatic.log

        # Step 1
        # Map reads to hg19 reference
        hisat2 \
        -p 4 -q \
        -x $ht2_genome/genome \
        --fr \
        --rg-id ${pre_name} \
        --rg SM:${pre_name} --rg LB:${pre_name} --rg PL:ILLUMINA \
        -1 $out_path/$pre_name/${pre_name}_trimmed_1P.fq.gz \
        -2 $out_path/$pre_name/${pre_name}_trimmed_2P.fq.gz | samtools sort -@ 4 - \
        -T $out_path/$pre_name/log/rna_temp \
        -l 1 \
        -o $out_path/$pre_name/${pre_name}_sorted.bam \
        2> $out_path/$pre_name/log/${pre_name}_samtools_sort.log

        # Step 2
        # Count number of reads on genes
        featureCounts \
                -T 4 \
                -p \
                -t exon \
                -g gene_id \
                -a $gtf \
                -o $out_path/$pre_name/${pre_name}_featureCounts.txt \
                $out_path/$pre_name/${pre_name}_sorted.bam \
                2> $out_path/$pre_name/log/${pre_name}_featureCounts.log

        done

上述步骤挺简单的,就不赘述了。接下来是在R中的处理,因为共有60个文件,所以要批量读入处理,这里就要用到lapply和sapply函数了。
lapply的使用格式为:
lapply(X, FUN, ...)
lapply的返回值是和一个和X有相同的长度的list对象,这个list对象中的每个元素是将函数FUN应用到X的每一个元素。其中X为List对象(该list的每个元素都是一个向量),其他类型的对象会被R通过函数as.list()自动转换为list类型。
函数sapply是函数lapply的一个特殊情形,对一些参数的值进行了一些限定,其使用格式为:
sapply(X, FUN,..., simplify = TRUE, USE.NAMES = TRUE)
sapply(x, simplify = FALSE, USE.NAMES = FALSE) 和lapply()的返回值是相同的。如果参数simplify=TRUE,则函数sapply的返回值不是一个list,而是一个矩阵;若simplify=FALSE,则函数sapply的返回值仍然是一个list。

source("http://bioconductor.org/biocLite.R")
biocLite("DESeq2")
library(DESeq2)
library(org.Hs.eg.db)
library(pheatmap)
setwd('/work/work/rna-seq-lhc/deseq2_results')
##读取同一目录下的所有文件
path <- "/work/work/rna-seq-lhc/featurecounts_results_cut" ##文件目录
fileNames <- dir(path)  ##获取该路径下的文件名
filePath <- sapply(fileNames, function(x){ 
  paste(path,x,sep='/')})   ##生成读取文件路径
data <- lapply(filePath, function(x){
  read.table(x,sep = '\t', header=F,stringsAsFactors = F)})  ##读取数据,结果为list

data2 <- lapply(data, function(x){data.frame(ID=gsub('\\..*', '', x$V1), Count=x$V2)})##去掉ensemble版本号
data3 <- do.call(cbind, data2)##把60个样本合并
data4 <- data3
data4$ID <- data4$`Human_J429-ZX01-L01_featureCounts.txt.ID`
data4 <- data4[,c('ID', grep('Count$', colnames(data4), value = T))]
data5=aggregate(data4[,-1],by=list(data4$ID),sum)##重复的基因相加
rownames(data5)=data5$Group.1
data5=data5[,-1]
colnames(data5)=unlist(strsplit(colnames(data5),'_f'))[seq(1,ncol(data5)*2,by=2)]
data5[1:6,1:6]

table=read.csv('/work/work/rna-seq-lhc/分组信息.csv')
table1=table[table$condition=='before'|table$condition=='after',]##选出治疗前和治疗后的病人
count1=data5[,table1$name]
countmatrix1<-as.matrix(count1)
dds1 <- DESeqDataSetFromMatrix(countmatrix1, colData=table1, design= ~ condition)
dds1 <- dds1[ rowSums(counts(dds1)) > 1, ]
dds1 <- DESeq(dds1)##标准化
res1 <- results(dds1)
res1$genesymbol <- mapIds(org.Hs.eg.db,keys = rownames(res1),column = "SYMBOL",keytype ="ENSEMBL",multiVals = 'first')##加一列基因symbol
res1 <- res1[order(res1$padj),]
res1 <- res1[,c(7,1,2,3,4,5,6)]
res1 <- merge (as.data.frame(res1),as.data.frame(counts(dds1,normalize=TRUE)),by="row.names",sort=FALSE)
write.table(res1[1:1000,],"result_before_after_1000.csv", sep = ",", row.names = T)
differgenes1<-subset(res1,padj<0.05&(log2FoldChange>1|log2FoldChange< -1))

log2.norm.counts1 <- as.data.frame(log2(counts(dds1,normalize=T)+1))[differgenes1$Row.names,]
log2.norm.counts1[log2.norm.counts1>3]=3
log2.norm.counts1 <- t(scale(t(log2.norm.counts1)))##scale
annotation_col1 <- data.frame(condition=table1$condition)
rownames(annotation_col1) <- table1$name
pheatmap(log2.norm.counts1, cluster_rows=TRUE, show_rownames=FALSE,
         cluster_cols=T, annotation_col = annotation_col1)

输出的res1就是差异表达前一千的基因,后面是DEseq2标准化后的counts数。如果有需要,还可以做一些tsne图,火山图,富集图。我并没有做,在这里把之前相关的脚本贴一下。
火山图

#valcano plot
library(ggplot2)
library(openxlsx)
library(ggrepel)
library(dplyr)
data=read.xlsx('~/桌面/初步分析2-egfl7.xlsx',sheet = 1)
#data=read.xlsx('~/桌面/初步分析-mir126.xlsx',sheet = 1)

data$threshold <- as.factor(ifelse(data$pvalue < 0.05 & abs(data$log2FC) >= 1,ifelse(data$log2FC > 1 ,'Up','Down'),'Not'))
p=ggplot(data=data,aes(x=log2FC, y =-log10(pvalue),colour=threshold,fill=threshold)) +
  scale_color_manual(values=c("blue", "grey","red"))+
  geom_point(alpha=0.8, size=1.2)+
  xlim(c(-4, 4)) +
  theme_bw(base_size = 12, base_family = "Times") +
  geom_vline(xintercept=c(-1,1),lty=4,col="grey",lwd=0.6)+
  geom_hline(yintercept = -log10(0.05),lty=4,col="grey",lwd=0.6)+
  theme(legend.position="right",
        panel.grid=element_blank(),
        legend.title = element_blank(),
        legend.text= element_text(face="bold", color="black",family = "Times", size=8),
        plot.title = element_text(hjust = 0.5),
        axis.text.x = element_text(face="bold", color="black", size=12),
        axis.text.y = element_text(face="bold",  color="black", size=12),
        axis.title.x = element_text(face="bold", color="black", size=12),
        axis.title.y = element_text(face="bold",color="black", size=12))+
  labs(x="log2 (fold change)",y="-log10 (p-value)",title="Volcano picture of DEG")

p+geom_text_repel(data=filter(data, pvalue< 4.32E-09), aes(label=genesymbol),show_guide=F)##把p值小于4.32E-09的基因标注出来

tsne图

##tsne
tsne_matrix <- t(degs_counts)
tsne_result <- Rtsne(tsne_matrix,perplexity = 3,pca = F,theta=0.5)
tsne_plot <- data.frame(Cluster.1 = tsne_result$Y[,1], Cluster.2 = tsne_result$Y[,2], 
                        Type = factor(annotation$Type,levels = c("health","before")))
ggplot(tsne_plot) + 
  geom_point(aes(x=Cluster.1, y=Cluster.2, color=Type)) +
  theme_bw(base_size = 12, base_family = "") +
  theme(legend.justification=c(0,0),legend.position = c(0.75,0.75),
        legend.title = element_blank(),
        legend.text = element_text(size = 10)) + 
  ggtitle("t-SNE Clustering (top 100 DEGs)") + 
  theme(plot.title = element_text(hjust = 0.5))
dev.off()

GO、KEGG富集

source("https://bioconductor.org/biocLite.R")
biocLite("org.Hs.eg.db")
setwd("/work/R语言/")
library(org.Hs.eg.db)
#install clusterProfiler
source("https://bioconductor.org/biocLite.R")
biocLite("org.Mm.eg.db")
library(org.Mm.eg.db)
biocLite("clusterProfiler")
library(clusterProfiler)
#GO
ego<-enrichGO(OrgDb="org.Mm.eg.db", 
             #gene = row.names(differgenes),
             gene = rownames(results[grep("1",results$GeneCluster),]),
             pvalueCutoff = 0.01,
             keytype = "ENSEMBL",
             readable=TRUE)
write.csv(as.data.frame(ego),"G-enrich.csv",row.names =F)

#KEGG
a=read.csv("/work/new_output/filter_immune/at2_subtype/at2.csv",header = T,sep = ",",stringsAsFactors = F)
x<-select(org.Mm.eg.db,
          keys = a[,z], 
          column = "ENTREZID", 
          keytype = "ENSEMBL"
          )
kegg<-x10[,2]
ekk <- enrichKEGG(gene=kegg,
                  keyType = "kegg",
                  organism = 'mmu',
                  pvalueCutoff = 0.05,
                  pAdjustMethod = "BH", 
                  qvalueCutoff = 0.1)
DOSE::dotplot(ekk, font.size=10)
  write.csv(as.data.frame(ekk),y,row.names =F)
上一篇下一篇

猜你喜欢

热点阅读