农机轨迹分析

hive 、mysql、sparksql 比较

2017-10-25  本文已影响36人  至极L

主要是通过实验比较三者的速度。
数据生成Python代码

import csv
import random
import pymysql

if __name__ == "__main__":
    def getOneTraj():
        vme_id = 'S90110000' + str(random.randint(2, 9))
        gps_time = '2015-08-' + str(random.randint(10, 30)) + ' 09:29:11'
        work_state = str(random.randint(0, 1))
        ultrasonic_station = str(random.randint(0, 1))
        limit_switch_state = str(random.randint(0, 1))
        work_deep = str(random.randint(0, 1000))
        longtitude = str(random.uniform(60, 90))
        latitude = str(random.uniform(30, 60))
        elevation = str(random.uniform(0, 1160))
        speed = str(random.uniform(0, 60))
        course_direction = str(random.randint(0, 599))
        str1 = vme_id + '\t' + gps_time + '\t' + work_state + '\t' + ultrasonic_station + '\t' + limit_switch_state + '\t' + work_deep + '\t' + longtitude + '\t' + latitude + '\t' + elevation + '\t' + speed + '\t' + course_direction + '\n'
        return str1


    count = 10
    for i  in [4000,4000,4000,4000,4000,4000,4000,4000,4000,4000]:
        fileName = 'test'+str(count)+'.csv'
        count = count + 1
        f = open(fileName, 'w')
        print(i)
        for k in range(0,i):
            str1=''
            for j in range(0,10000):
                str1 = str1+ getOneTraj()
            f.write(str1)
        f.close()

性能评测sql语句

select count(*)  from trajectory;
select  count(*)  from trajectory  group by vme_id  ;

1、mysql查寻速度的测试

# 建表
CREATE DATABASE dbtac;
USE dbtac;

CREATE TABLE trajectory  (vme_id varchar(100),gps_time varchar(100),work_state  INT,ultrasonic_station  INT,limit_switch_state  INT,work_deep INT,longtitude DOUBLE,latitude DOUBLE,elevation INT,speed INT, course_direction INT);

#测试插入数据
insert into trajectory values('sdd','21',1,1,1,1,1,1,1,1,1);

mysql可以直接导入csv,但是对于Ubuntu系统有很多权限问题,试了好几次不成功,所以就用一下方法。
使用Python编程,通过pymysql 往mysql添加数据

import pymysql
import random
import datetime   
print('连接到mysql服务器...')
conn = pymysql.connect("localhost","root","123","dbtac")
print('连接上了!')
cursor = conn.cursor()
values=[]  
for i in range(400):  #往数据库写400次,也就是加4亿条数据
    values=[] 
    now=datetime.datetime.now()
    for j in range(1000000):#每100w条插入数据库一次,简单测试效果最佳
        vme_id = 'S90110000' + str(random.randint(2, 9))
        gps_time = '2015-08-' + str(random.randint(10, 30)) + ' 09:29:11'
        work_state = (random.randint(0, 1))
        ultrasonic_station = (random.randint(0, 1))
        limit_switch_state = (random.randint(0, 1))
        work_deep = (random.randint(0, 900))
        longtitude = (random.uniform(60, 90))
        latitude = (random.uniform(30, 60))
        elevation = (random.uniform(0, 1160))
        speed = (random.uniform(0, 60))
        course_direction = (random.randint(0, 599))                
        value=(vme_id,gps_time,work_state,ultrasonic_station,limit_switch_state,work_deep,longtitude,latitude,elevation,speed,course_direction)  
        values.append(value)
    end=datetime.datetime.now()
    print('get the number: ')
    print ((end - now))
    now=datetime.datetime.now()
    cursor.executemany("insert into trajectory values(%s,%s,%s, %s, %s, %s, %s,%s,%s, %s, %s)",values)  
    conn.commit()
    end=datetime.datetime.now()
    print('insert:')
    print (( end-now))  
cursor.close()  
conn.close()  
quit()

注:因为往mysql数据库中添加4亿条数据(约40G)很慢,超过12个小时。
解决方法:多运行几个Python终端,同时执行上面的程序,需要改i的值。

2.spark-sql

#启动Hadoop集群
start-all.sh  
#启动spark 集群
start-master.sh
start-slaves.sh

#启动spark-sql集群
spark-sql --master spark://master:7077  --driver-cores 8  --driver-java-options "-Dspark.driver.port=4050"  --conf spark.sql.warehouse.dir = hdfs://master:9000/user/hive/warehouse   --master yarn


#如果失败 可以用本地模式启动spark-sql
spark-sql

参考: 在集群上运行spark

查看Hadoop集群运行情况 网址 http://localhost:50070/
查看spark集群运营情况网址 http://master:8080/

进入spark-sql后运行

CREATE DATABASE dbtac; 
use dbtac;

DROP TABLE IF EXISTS trajectory;
#或者使用下面语句删除
truncate table trajectory;

CREATE  TABLE dbtac.trajectory(vme_id STRING,gps_time STRING,work_state INT,ultrasonic_station INT,limit_switch_state INT,work_deep INT,longtitude DOUBLE,latitude DOUBLE,elevation INT,speed INT, course_direction INT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE ;

#数据存在hdfs上加载数据的方式
load data inpath 'hdfs://Master:9000/dbtac/test/test10.csv'   into table trajectory;

#数据存在本地的加载数据的方式
LOAD DATA LOCAL INPATH '/win/test/test10.csv' INTO TABLE trajectory;

查看性能

select count(*) from trajectory where work_deep>40;
select count(*) from trajectory where work_deep>40 and speed >20;

删除一定数量的记录

delete from trajectory limit 100000;

3、 hive

上传文件到hdfs

hdfs dfs -mkdir /dbtac
hdfs dfs -put /usr/local/dbtac/tac  /dbtac
hdfs dfs -ls  /dbtac/tac

hdfs dfs -rm -r  /dbtac  #删除dbtac文件夹

启动hive

cd /usr/local/hive
./bin/hive

进入hive命令

CREATE DATABASE dbtac; 
use dbtac;#切换到dbtac数据库

DROP TABLE IF EXISTS trajectory;
#创建数据库
 CREATE EXTERNAL TABLE dbtac.trajectory(vme_id STRING,gps_time STRING,work_state INT,ultrasonic_station INT,limit_switch_state INT,work_deep INT,longtitude DOUBLE,latitude DOUBLE,elevation INT,speed INT, course_direction INT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE ;

# 从hdfs加载数据
load data inpath 'hdfs://Master:9000/user/hive/warehouse/dbtac.db/trajectory/test.csv'   into table trajectory;

其他

如何将window上的共享目录加载到Ubuntu,共享window的数据

sudo apt-get install cifs-utils
sudo mkdir  /win
 sudo mount -t cifs  -o username=xiaojing,password=xxx  //192.168.17.122/mydata  /win
上一篇下一篇

猜你喜欢

热点阅读