public HdfsPreviewResp hdfsPreview()

in manager/manager/src/main/java/org/apache/doris/stack/service/construct/DataImportService.java [241:308]


    public HdfsPreviewResp hdfsPreview(HdfsConnectReq info, CoreUserEntity user, int tableId) throws Exception {
        log.debug("User {} get hdfs file preview.", user.getId());
        if (StringUtils.isEmpty(info.getHost()) || StringUtils.isEmpty(info.getFileUrl()) || info.getFormat() == null
                || info.getColumnSeparator() == null) {
            log.error("Hdfs import host,file url, file format or file column separator is null");
            throw new RequestFieldNullException();
        }

        ClusterInfoEntity clusterInfo = clusterUserComponent.getUserCurrentClusterAndCheckAdmin(user);
        ManagerTableEntity tableEntity = tableRepository.findById(tableId).get();
        ManagerDatabaseEntity databaseEntity =
                databuildComponent.checkClusterDatabase(tableEntity.getDbId(), clusterInfo.getId());

        // get broker name
        String sql = "show broker";
        NativeQueryResp queryResp;
        String brokerName = null;
        try {
            queryResp = queryClient.executeSQL(sql, ConstantDef.DORIS_DEFAULT_NS, databaseEntity.getName(), clusterInfo);
            List<List<String>> datas = queryResp.getData();
            //TODO:There is only one borker by default
            List<String> data = datas.get(0);
            brokerName = data.get(0);
            log.debug("Get hdfs broker {}.", brokerName);
        } catch (Exception e) {
            log.error("Get borker exception {}", e);
            throw new UnknownException("The Palo cluster no have broker, please install.");
        }

        HdfsFilePreviewReq previewReq = new HdfsFilePreviewReq();
        // TODO:Currently only HDFS is supported
        StringBuffer fileUrlBuffer = new StringBuffer();
        fileUrlBuffer.append("hdfs://");
        fileUrlBuffer.append(info.getHost());
        fileUrlBuffer.append(":");
        fileUrlBuffer.append(info.getPort());
        fileUrlBuffer.append(info.getFileUrl());

        HdfsFilePreviewReq.FileInfo fileInfo = new HdfsFilePreviewReq.FileInfo();
        fileInfo.setColumnSeparator(info.getColumnSeparator());
        fileInfo.setFileUrl(fileUrlBuffer.toString());
        fileInfo.setFormat(info.getFormat().name());
        previewReq.setFileInfo(fileInfo);

        if (brokerName != null) {
            log.debug("broker not null, add broker {} Props.", brokerName);
            HdfsFilePreviewReq.ConnectInfo connectInfo = new HdfsFilePreviewReq.ConnectInfo();
            connectInfo.setBrokerName(brokerName);

            // TODO:Currently HDFS does not require a password
            Map<String, String> brokerProps = Maps.newHashMap();
            brokerProps.put("username", "");
            brokerProps.put("password", "");
            connectInfo.setBrokerProps(brokerProps);
            previewReq.setConnectInfo(connectInfo);
        }

        // request Doris
        HdfsFilePreview filePreview = fileUploadClient.getHdfsPreview(previewReq, clusterInfo);

        HdfsPreviewResp resp = new HdfsPreviewResp();
        resp.setPeviewStatistic(filePreview.getReviewStatistic());
        resp.setFileSample(filePreview.getFileSample());
        resp.setConnectInfo(previewReq.getConnectInfo());
        resp.setFileInfo(previewReq.getFileInfo());
        log.debug("Get hdfs file preview success.");
        return resp;
    }